1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2016 Jakub Klama <jceel@FreeBSD.org>.
5 * Copyright (c) 2018 Marcelo Araujo <araujo@FreeBSD.org>.
6 * Copyright (c) 2026 Hans Rosenfeld
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer
14 * in this position and unchanged.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #include <sys/param.h>
33 #include <sys/linker_set.h>
34 #include <sys/types.h>
35 #include <sys/uio.h>
36 #include <sys/time.h>
37 #include <sys/queue.h>
38 #include <sys/sbuf.h>
39
40 #include <errno.h>
41 #include <fcntl.h>
42 #include <stdio.h>
43 #include <stdlib.h>
44 #include <stdbool.h>
45 #include <string.h>
46 #include <unistd.h>
47 #include <assert.h>
48 #include <pthread.h>
49 #include <pthread_np.h>
50
51 #include <cam/scsi/scsi_all.h>
52 #include <cam/scsi/scsi_message.h>
53 #include <cam/ctl/ctl.h>
54 #include <cam/ctl/ctl_io.h>
55 #include <cam/ctl/ctl_backend.h>
56 #include <cam/ctl/ctl_ioctl.h>
57 #include <cam/ctl/ctl_util.h>
58 #include <cam/ctl/ctl_scsi_all.h>
59 #include <camlib.h>
60
61 #include "bhyverun.h"
62 #include "config.h"
63 #include "debug.h"
64 #include "pci_emul.h"
65 #include "virtio.h"
66 #include "iov.h"
67
68 #define VTSCSI_RINGSZ 64
69 #define VTSCSI_REQUESTQ 1
70 #define VTSCSI_THR_PER_Q 16
71 #define VTSCSI_MAXQ (VTSCSI_REQUESTQ + 2)
72 #define VTSCSI_MAXSEG 64
73
74 #define VTSCSI_IN_HEADER_LEN(_sc) \
75 (sizeof(struct pci_vtscsi_req_cmd_rd) + _sc->vss_config.cdb_size)
76
77 #define VTSCSI_OUT_HEADER_LEN(_sc) \
78 (sizeof(struct pci_vtscsi_req_cmd_wr) + _sc->vss_config.sense_size)
79
80 #define VIRTIO_SCSI_MAX_CHANNEL 0
81 #define VIRTIO_SCSI_MAX_TARGET 0
82 #define VIRTIO_SCSI_MAX_LUN 16383
83
84 #define VIRTIO_SCSI_F_INOUT (1 << 0)
85 #define VIRTIO_SCSI_F_HOTPLUG (1 << 1)
86 #define VIRTIO_SCSI_F_CHANGE (1 << 2)
87
88 static int pci_vtscsi_debug = 0;
89 #define WPRINTF(msg, params...) PRINTLN("virtio-scsi: " msg, ##params)
90 #define DPRINTF(msg, params...) if (pci_vtscsi_debug) WPRINTF(msg, ##params)
91
92 struct pci_vtscsi_config {
93 uint32_t num_queues;
94 uint32_t seg_max;
95 uint32_t max_sectors;
96 uint32_t cmd_per_lun;
97 uint32_t event_info_size;
98 uint32_t sense_size;
99 uint32_t cdb_size;
100 uint16_t max_channel;
101 uint16_t max_target;
102 uint32_t max_lun;
103 } __attribute__((packed));
104
105 /*
106 * I/O request state and I/O request queues
107 *
108 * In addition to the control queue and notification queues, each virtio-scsi
109 * device instance has at least one I/O request queue, the state of which is
110 * is kept in an array of struct pci_vtscsi_queue in the device softc.
111 *
112 * Currently there is only one I/O request queue, but it's trivial to support
113 * more than one.
114 *
115 * Each pci_vtscsi_queue has VTSCSI_RINGSZ pci_vtscsi_request structures pre-
116 * allocated on vsq_free_requests. For each I/O request coming in on the I/O
117 * virtqueue, the request queue handler will take a pci_vtscsi_request off
118 * vsq_free_requests, fills in the data from the I/O virtqueue, puts it on
119 * vsq_requests, and signals vsq_cv.
120 *
121 * There are VTSCSI_THR_PER_Q worker threads for each pci_vtscsi_queue which
122 * wait on vsq_cv. When signalled, they repeatedly take one pci_vtscsi_request
123 * off vsq_requests, construct a ctl_io for it, and hand it off to the CTL ioctl
124 * Interface, which processes it synchronously. After completion of the request,
125 * the pci_vtscsi_request is re-initialized and put back onto vsq_free_requests.
126 *
127 * The worker threads exit when vsq_cv is signalled after vsw_exiting was set.
128 *
129 * There are three mutexes to coordinate the accesses to an I/O request queue:
130 * - vsq_rmtx protects vsq_requests and must be held when waiting on vsq_cv
131 * - vsq_fmtx protects vsq_free_requests
132 * - vsq_qmtx must be held when operating on the underlying virtqueue, vsq_vq
133 */
134 STAILQ_HEAD(pci_vtscsi_req_queue, pci_vtscsi_request);
135
136 struct pci_vtscsi_queue {
137 struct pci_vtscsi_softc * vsq_sc;
138 struct vqueue_info * vsq_vq;
139 pthread_mutex_t vsq_rmtx;
140 pthread_mutex_t vsq_fmtx;
141 pthread_mutex_t vsq_qmtx;
142 pthread_cond_t vsq_cv;
143 struct pci_vtscsi_req_queue vsq_requests;
144 struct pci_vtscsi_req_queue vsq_free_requests;
145 LIST_HEAD(, pci_vtscsi_worker) vsq_workers;
146 };
147
148 struct pci_vtscsi_worker {
149 struct pci_vtscsi_queue * vsw_queue;
150 pthread_t vsw_thread;
151 bool vsw_exiting;
152 LIST_ENTRY(pci_vtscsi_worker) vsw_link;
153 };
154
155 struct pci_vtscsi_request {
156 struct pci_vtscsi_queue * vsr_queue;
157 struct iovec vsr_iov[VTSCSI_MAXSEG + SPLIT_IOV_ADDL_IOV];
158 struct iovec * vsr_iov_in;
159 struct iovec * vsr_iov_out;
160 struct iovec * vsr_data_iov_in;
161 struct iovec * vsr_data_iov_out;
162 struct pci_vtscsi_req_cmd_rd * vsr_cmd_rd;
163 struct pci_vtscsi_req_cmd_wr * vsr_cmd_wr;
164 union ctl_io * vsr_ctl_io;
165 size_t vsr_niov_in;
166 size_t vsr_niov_out;
167 size_t vsr_data_niov_in;
168 size_t vsr_data_niov_out;
169 uint32_t vsr_idx;
170 STAILQ_ENTRY(pci_vtscsi_request) vsr_link;
171 };
172
173 /*
174 * Per-device softc
175 */
176 struct pci_vtscsi_softc {
177 struct virtio_softc vss_vs;
178 struct vqueue_info vss_vq[VTSCSI_MAXQ];
179 struct pci_vtscsi_queue vss_queues[VTSCSI_REQUESTQ];
180 pthread_mutex_t vss_mtx;
181 int vss_iid;
182 int vss_ctl_fd;
183 uint32_t vss_features;
184 struct pci_vtscsi_config vss_config;
185 };
186
187 #define VIRTIO_SCSI_T_TMF 0
188 #define VIRTIO_SCSI_T_TMF_ABORT_TASK 0
189 #define VIRTIO_SCSI_T_TMF_ABORT_TASK_SET 1
190 #define VIRTIO_SCSI_T_TMF_CLEAR_ACA 2
191 #define VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET 3
192 #define VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET 4
193 #define VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET 5
194 #define VIRTIO_SCSI_T_TMF_QUERY_TASK 6
195 #define VIRTIO_SCSI_T_TMF_QUERY_TASK_SET 7
196
197 /* command-specific response values */
198 #define VIRTIO_SCSI_S_FUNCTION_COMPLETE 0
199 #define VIRTIO_SCSI_S_FUNCTION_SUCCEEDED 10
200 #define VIRTIO_SCSI_S_FUNCTION_REJECTED 11
201
202 struct pci_vtscsi_ctrl_tmf {
203 const uint32_t type;
204 const uint32_t subtype;
205 const uint8_t lun[8];
206 const uint64_t id;
207 uint8_t response;
208 } __attribute__((packed));
209
210 #define VIRTIO_SCSI_T_AN_QUERY 1
211 #define VIRTIO_SCSI_EVT_ASYNC_OPERATIONAL_CHANGE 2
212 #define VIRTIO_SCSI_EVT_ASYNC_POWER_MGMT 4
213 #define VIRTIO_SCSI_EVT_ASYNC_EXTERNAL_REQUEST 8
214 #define VIRTIO_SCSI_EVT_ASYNC_MEDIA_CHANGE 16
215 #define VIRTIO_SCSI_EVT_ASYNC_MULTI_HOST 32
216 #define VIRTIO_SCSI_EVT_ASYNC_DEVICE_BUSY 64
217
218 struct pci_vtscsi_ctrl_an {
219 const uint32_t type;
220 const uint8_t lun[8];
221 const uint32_t event_requested;
222 uint32_t event_actual;
223 uint8_t response;
224 } __attribute__((packed));
225
226 /* command-specific response values */
227 #define VIRTIO_SCSI_S_OK 0
228 #define VIRTIO_SCSI_S_OVERRUN 1
229 #define VIRTIO_SCSI_S_ABORTED 2
230 #define VIRTIO_SCSI_S_BAD_TARGET 3
231 #define VIRTIO_SCSI_S_RESET 4
232 #define VIRTIO_SCSI_S_BUSY 5
233 #define VIRTIO_SCSI_S_TRANSPORT_FAILURE 6
234 #define VIRTIO_SCSI_S_TARGET_FAILURE 7
235 #define VIRTIO_SCSI_S_NEXUS_FAILURE 8
236 #define VIRTIO_SCSI_S_FAILURE 9
237 #define VIRTIO_SCSI_S_INCORRECT_LUN 12
238
239 /* task_attr */
240 #define VIRTIO_SCSI_S_SIMPLE 0
241 #define VIRTIO_SCSI_S_ORDERED 1
242 #define VIRTIO_SCSI_S_HEAD 2
243 #define VIRTIO_SCSI_S_ACA 3
244
245 struct pci_vtscsi_event {
246 uint32_t event;
247 uint8_t lun[8];
248 uint32_t reason;
249 } __attribute__((packed));
250
251 struct pci_vtscsi_req_cmd_rd {
252 const uint8_t lun[8];
253 const uint64_t id;
254 const uint8_t task_attr;
255 const uint8_t prio;
256 const uint8_t crn;
257 const uint8_t cdb[];
258 } __attribute__((packed));
259
260 struct pci_vtscsi_req_cmd_wr {
261 uint32_t sense_len;
262 uint32_t residual;
263 uint16_t status_qualifier;
264 uint8_t status;
265 uint8_t response;
266 uint8_t sense[];
267 } __attribute__((packed));
268
269 static void *pci_vtscsi_proc(void *);
270 static void pci_vtscsi_reset(void *);
271 static void pci_vtscsi_neg_features(void *, uint64_t);
272 static int pci_vtscsi_cfgread(void *, int, int, uint32_t *);
273 static int pci_vtscsi_cfgwrite(void *, int, int, uint32_t);
274
275 static inline bool pci_vtscsi_check_lun(const uint8_t *);
276 static inline int pci_vtscsi_get_lun(const uint8_t *);
277
278 static void pci_vtscsi_control_handle(struct pci_vtscsi_softc *, void *, size_t);
279 static void pci_vtscsi_tmf_handle(struct pci_vtscsi_softc *,
280 struct pci_vtscsi_ctrl_tmf *);
281 static void pci_vtscsi_an_handle(struct pci_vtscsi_softc *,
282 struct pci_vtscsi_ctrl_an *);
283
284 static struct pci_vtscsi_request *pci_vtscsi_alloc_request(
285 struct pci_vtscsi_softc *);
286 static void pci_vtscsi_free_request(struct pci_vtscsi_request *);
287 static struct pci_vtscsi_request *pci_vtscsi_get_request(
288 struct pci_vtscsi_req_queue *);
289 static void pci_vtscsi_put_request(struct pci_vtscsi_req_queue *,
290 struct pci_vtscsi_request *);
291 static void pci_vtscsi_queue_request(struct pci_vtscsi_softc *,
292 struct vqueue_info *);
293 static void pci_vtscsi_return_request(struct pci_vtscsi_queue *,
294 struct pci_vtscsi_request *, int);
295 static int pci_vtscsi_request_handle(struct pci_vtscsi_softc *,
296 struct pci_vtscsi_request *);
297
298 static void pci_vtscsi_controlq_notify(void *, struct vqueue_info *);
299 static void pci_vtscsi_eventq_notify(void *, struct vqueue_info *);
300 static void pci_vtscsi_requestq_notify(void *, struct vqueue_info *);
301 static int pci_vtscsi_init_queue(struct pci_vtscsi_softc *,
302 struct pci_vtscsi_queue *, int);
303 static void pci_vtscsi_destroy_queue(struct pci_vtscsi_queue *);
304 static int pci_vtscsi_init(struct pci_devinst *, nvlist_t *);
305
306 static struct virtio_consts vtscsi_vi_consts = {
307 .vc_name = "vtscsi",
308 .vc_nvq = VTSCSI_MAXQ,
309 .vc_cfgsize = sizeof(struct pci_vtscsi_config),
310 .vc_reset = pci_vtscsi_reset,
311 .vc_cfgread = pci_vtscsi_cfgread,
312 .vc_cfgwrite = pci_vtscsi_cfgwrite,
313 .vc_apply_features = pci_vtscsi_neg_features,
314 .vc_hv_caps = VIRTIO_RING_F_INDIRECT_DESC,
315 };
316
317 static void *
pci_vtscsi_proc(void * arg)318 pci_vtscsi_proc(void *arg)
319 {
320 struct pci_vtscsi_worker *worker = (struct pci_vtscsi_worker *)arg;
321 struct pci_vtscsi_queue *q = worker->vsw_queue;
322 struct pci_vtscsi_softc *sc = q->vsq_sc;
323 int iolen;
324
325 for (;;) {
326 struct pci_vtscsi_request *req;
327
328 pthread_mutex_lock(&q->vsq_rmtx);
329
330 while (STAILQ_EMPTY(&q->vsq_requests) && !worker->vsw_exiting)
331 pthread_cond_wait(&q->vsq_cv, &q->vsq_rmtx);
332
333 if (worker->vsw_exiting) {
334 pthread_mutex_unlock(&q->vsq_rmtx);
335 return (NULL);
336 }
337
338 req = pci_vtscsi_get_request(&q->vsq_requests);
339 pthread_mutex_unlock(&q->vsq_rmtx);
340
341 DPRINTF("I/O request lun %d, data_niov_in %zu, data_niov_out "
342 "%zu", pci_vtscsi_get_lun(req->vsr_cmd_rd->lun),
343 req->vsr_data_niov_in, req->vsr_data_niov_out);
344
345 iolen = pci_vtscsi_request_handle(sc, req);
346
347 pci_vtscsi_return_request(q, req, iolen);
348 }
349 }
350
351 static void
pci_vtscsi_reset(void * vsc)352 pci_vtscsi_reset(void *vsc)
353 {
354 struct pci_vtscsi_softc *sc;
355
356 sc = vsc;
357
358 DPRINTF("device reset requested");
359 vi_reset_dev(&sc->vss_vs);
360
361 /* initialize config structure */
362 sc->vss_config = (struct pci_vtscsi_config){
363 .num_queues = VTSCSI_REQUESTQ,
364 /* Leave room for the request and the response. */
365 .seg_max = VTSCSI_MAXSEG - 2,
366 /* CTL apparently doesn't have a limit here */
367 .max_sectors = INT32_MAX,
368 .cmd_per_lun = 1,
369 .event_info_size = sizeof(struct pci_vtscsi_event),
370 .sense_size = 96,
371 .cdb_size = 32,
372 .max_channel = VIRTIO_SCSI_MAX_CHANNEL,
373 .max_target = VIRTIO_SCSI_MAX_TARGET,
374 .max_lun = VIRTIO_SCSI_MAX_LUN
375 };
376 }
377
378 static void
pci_vtscsi_neg_features(void * vsc,uint64_t negotiated_features)379 pci_vtscsi_neg_features(void *vsc, uint64_t negotiated_features)
380 {
381 struct pci_vtscsi_softc *sc = vsc;
382
383 sc->vss_features = negotiated_features;
384 }
385
386 static int
pci_vtscsi_cfgread(void * vsc,int offset,int size,uint32_t * retval)387 pci_vtscsi_cfgread(void *vsc, int offset, int size, uint32_t *retval)
388 {
389 struct pci_vtscsi_softc *sc = vsc;
390 void *ptr;
391
392 ptr = (uint8_t *)&sc->vss_config + offset;
393 memcpy(retval, ptr, size);
394 return (0);
395 }
396
397 static int
pci_vtscsi_cfgwrite(void * vsc __unused,int offset __unused,int size __unused,uint32_t val __unused)398 pci_vtscsi_cfgwrite(void *vsc __unused, int offset __unused, int size __unused,
399 uint32_t val __unused)
400 {
401 return (0);
402 }
403
404 /*
405 * LUN address parsing
406 *
407 * The LUN address consists of 8 bytes. While the spec describes this as 0x01,
408 * followed by the target byte, followed by a "single-level LUN structure",
409 * this is actually the same as a hierarchical LUN address as defined by SAM-5,
410 * consisting of four levels of addressing, where in each level the two MSB of
411 * byte 0 select the address mode used in the remaining bits and bytes.
412 *
413 *
414 * Only the first two levels are acutally used by virtio-scsi:
415 *
416 * Level 1: 0x01, 0xTT: Peripheral Device Addressing: Bus 1, Target 0-255
417 * Level 2: 0xLL, 0xLL: Peripheral Device Addressing: Bus MBZ, LUN 0-255
418 * or: Flat Space Addressing: LUN (0-16383)
419 * Level 3 and 4: not used, MBZ
420 *
421 * Currently, we only support Target 0.
422 *
423 * Alternatively, the first level may contain an extended LUN address to select
424 * the REPORT_LUNS well-known logical unit:
425 *
426 * Level 1: 0xC1, 0x01: Extended LUN Adressing, Well-Known LUN 1 (REPORT_LUNS)
427 * Level 2, 3, and 4: not used, MBZ
428 *
429 * The virtio spec says that we SHOULD implement the REPORT_LUNS well-known
430 * logical unit but we currently don't.
431 *
432 * According to the virtio spec, these are the only LUNS address formats to be
433 * used with virtio-scsi.
434 */
435
436 /*
437 * Check that the given LUN address conforms to the virtio spec, does not
438 * address an unknown target, and especially does not address the REPORT_LUNS
439 * well-known logical unit.
440 */
441 static inline bool
pci_vtscsi_check_lun(const uint8_t * lun)442 pci_vtscsi_check_lun(const uint8_t *lun)
443 {
444 if (lun[0] == 0xC1)
445 return (false);
446
447 if (lun[0] != 0x01)
448 return (false);
449
450 if (lun[1] != 0x00)
451 return (false);
452
453 if (lun[2] != 0x00 && (lun[2] & 0xc0) != 0x40)
454 return (false);
455
456 if (lun[4] != 0 || lun[5] != 0 || lun[6] != 0 || lun[7] != 0)
457 return (false);
458
459 return (true);
460 }
461
462 /*
463 * Get the LUN id from a LUN address.
464 *
465 * Every code path using this function must have called pci_vtscsi_check_lun()
466 * before to make sure the LUN address is valid.
467 */
468 static inline int
pci_vtscsi_get_lun(const uint8_t * lun)469 pci_vtscsi_get_lun(const uint8_t *lun)
470 {
471 assert(lun[0] == 0x01);
472 assert(lun[1] == 0x00);
473 assert(lun[2] == 0x00 || (lun[2] & 0xc0) == 0x40);
474
475 return (((lun[2] << 8) | lun[3]) & 0x3fff);
476 }
477
478 static void
pci_vtscsi_control_handle(struct pci_vtscsi_softc * sc,void * buf,size_t bufsize)479 pci_vtscsi_control_handle(struct pci_vtscsi_softc *sc, void *buf,
480 size_t bufsize)
481 {
482 struct pci_vtscsi_ctrl_tmf *tmf;
483 struct pci_vtscsi_ctrl_an *an;
484 uint32_t type;
485
486 if (bufsize < sizeof(uint32_t)) {
487 WPRINTF("ignoring truncated control request");
488 return;
489 }
490
491 type = *(uint32_t *)buf;
492
493 if (type == VIRTIO_SCSI_T_TMF) {
494 if (bufsize != sizeof(*tmf)) {
495 WPRINTF("ignoring tmf request with size %zu", bufsize);
496 return;
497 }
498 tmf = (struct pci_vtscsi_ctrl_tmf *)buf;
499 pci_vtscsi_tmf_handle(sc, tmf);
500 } else if (type == VIRTIO_SCSI_T_AN_QUERY) {
501 if (bufsize != sizeof(*an)) {
502 WPRINTF("ignoring AN request with size %zu", bufsize);
503 return;
504 }
505 an = (struct pci_vtscsi_ctrl_an *)buf;
506 pci_vtscsi_an_handle(sc, an);
507 }
508 }
509
510 static void
pci_vtscsi_tmf_handle(struct pci_vtscsi_softc * sc,struct pci_vtscsi_ctrl_tmf * tmf)511 pci_vtscsi_tmf_handle(struct pci_vtscsi_softc *sc,
512 struct pci_vtscsi_ctrl_tmf *tmf)
513 {
514 union ctl_io *io;
515 int err;
516
517 if (pci_vtscsi_check_lun(tmf->lun) == false) {
518 DPRINTF("TMF request to invalid LUN %.2hhx%.2hhx-%.2hhx%.2hhx-"
519 "%.2hhx%.2hhx-%.2hhx%.2hhx", tmf->lun[0], tmf->lun[1],
520 tmf->lun[2], tmf->lun[3], tmf->lun[4], tmf->lun[5],
521 tmf->lun[6], tmf->lun[7]);
522
523 tmf->response = VIRTIO_SCSI_S_BAD_TARGET;
524 return;
525 }
526
527 io = ctl_scsi_alloc_io(sc->vss_iid);
528 if (io == NULL) {
529 WPRINTF("failed to allocate ctl_io: err=%d (%s)",
530 errno, strerror(errno));
531
532 tmf->response = VIRTIO_SCSI_S_FAILURE;
533 return;
534 }
535
536 ctl_scsi_zero_io(io);
537
538 io->io_hdr.io_type = CTL_IO_TASK;
539 io->io_hdr.nexus.initid = sc->vss_iid;
540 io->io_hdr.nexus.targ_lun = pci_vtscsi_get_lun(tmf->lun);
541 io->taskio.tag_type = CTL_TAG_SIMPLE;
542 io->taskio.tag_num = tmf->id;
543 io->io_hdr.flags |= CTL_FLAG_USER_TAG;
544
545 switch (tmf->subtype) {
546 case VIRTIO_SCSI_T_TMF_ABORT_TASK:
547 io->taskio.task_action = CTL_TASK_ABORT_TASK;
548 break;
549
550 case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET:
551 io->taskio.task_action = CTL_TASK_ABORT_TASK_SET;
552 break;
553
554 case VIRTIO_SCSI_T_TMF_CLEAR_ACA:
555 io->taskio.task_action = CTL_TASK_CLEAR_ACA;
556 break;
557
558 case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET:
559 io->taskio.task_action = CTL_TASK_CLEAR_TASK_SET;
560 break;
561
562 case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET:
563 io->taskio.task_action = CTL_TASK_I_T_NEXUS_RESET;
564 break;
565
566 case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET:
567 io->taskio.task_action = CTL_TASK_LUN_RESET;
568 break;
569
570 case VIRTIO_SCSI_T_TMF_QUERY_TASK:
571 io->taskio.task_action = CTL_TASK_QUERY_TASK;
572 break;
573
574 case VIRTIO_SCSI_T_TMF_QUERY_TASK_SET:
575 io->taskio.task_action = CTL_TASK_QUERY_TASK_SET;
576 break;
577 }
578
579 if (pci_vtscsi_debug) {
580 struct sbuf *sb = sbuf_new_auto();
581 ctl_io_sbuf(io, sb);
582 sbuf_finish(sb);
583 DPRINTF("%s", sbuf_data(sb));
584 sbuf_delete(sb);
585 }
586
587 err = ioctl(sc->vss_ctl_fd, CTL_IO, io);
588 if (err != 0)
589 WPRINTF("CTL_IO: err=%d (%s)", errno, strerror(errno));
590
591 tmf->response = io->taskio.task_status;
592 ctl_scsi_free_io(io);
593 }
594
595 static void
pci_vtscsi_an_handle(struct pci_vtscsi_softc * sc __unused,struct pci_vtscsi_ctrl_an * an __unused)596 pci_vtscsi_an_handle(struct pci_vtscsi_softc *sc __unused,
597 struct pci_vtscsi_ctrl_an *an __unused)
598 {
599 }
600
601 static struct pci_vtscsi_request *
pci_vtscsi_alloc_request(struct pci_vtscsi_softc * sc)602 pci_vtscsi_alloc_request(struct pci_vtscsi_softc *sc)
603 {
604 struct pci_vtscsi_request *req;
605
606 req = calloc(1, sizeof(struct pci_vtscsi_request));
607 if (req == NULL)
608 goto fail;
609
610 req->vsr_cmd_rd = calloc(1, VTSCSI_IN_HEADER_LEN(sc));
611 if (req->vsr_cmd_rd == NULL)
612 goto fail;
613 req->vsr_cmd_wr = calloc(1, VTSCSI_OUT_HEADER_LEN(sc));
614 if (req->vsr_cmd_wr == NULL)
615 goto fail;
616
617 req->vsr_ctl_io = ctl_scsi_alloc_io(sc->vss_iid);
618 if (req->vsr_ctl_io == NULL)
619 goto fail;
620 ctl_scsi_zero_io(req->vsr_ctl_io);
621
622 return (req);
623
624 fail:
625 EPRINTLN("failed to allocate request: %s", strerror(errno));
626
627 if (req != NULL)
628 pci_vtscsi_free_request(req);
629
630 return (NULL);
631 }
632
633 static void
pci_vtscsi_free_request(struct pci_vtscsi_request * req)634 pci_vtscsi_free_request(struct pci_vtscsi_request *req)
635 {
636 if (req->vsr_ctl_io != NULL)
637 ctl_scsi_free_io(req->vsr_ctl_io);
638 if (req->vsr_cmd_rd != NULL)
639 free(req->vsr_cmd_rd);
640 if (req->vsr_cmd_wr != NULL)
641 free(req->vsr_cmd_wr);
642
643 free(req);
644 }
645
646 static struct pci_vtscsi_request *
pci_vtscsi_get_request(struct pci_vtscsi_req_queue * req_queue)647 pci_vtscsi_get_request(struct pci_vtscsi_req_queue *req_queue)
648 {
649 struct pci_vtscsi_request *req;
650
651 assert(!STAILQ_EMPTY(req_queue));
652
653 req = STAILQ_FIRST(req_queue);
654 STAILQ_REMOVE_HEAD(req_queue, vsr_link);
655
656 return (req);
657 }
658
659 static void
pci_vtscsi_put_request(struct pci_vtscsi_req_queue * req_queue,struct pci_vtscsi_request * req)660 pci_vtscsi_put_request(struct pci_vtscsi_req_queue *req_queue,
661 struct pci_vtscsi_request *req)
662 {
663 STAILQ_INSERT_TAIL(req_queue, req, vsr_link);
664 }
665
666 static void
pci_vtscsi_queue_request(struct pci_vtscsi_softc * sc,struct vqueue_info * vq)667 pci_vtscsi_queue_request(struct pci_vtscsi_softc *sc, struct vqueue_info *vq)
668 {
669 struct pci_vtscsi_queue *q = &sc->vss_queues[vq->vq_num - 2];
670 struct pci_vtscsi_request *req;
671 struct vi_req vireq;
672 int n;
673
674 pthread_mutex_lock(&q->vsq_fmtx);
675 req = pci_vtscsi_get_request(&q->vsq_free_requests);
676 assert(req != NULL);
677 pthread_mutex_unlock(&q->vsq_fmtx);
678
679 n = vq_getchain(vq, req->vsr_iov, VTSCSI_MAXSEG, &vireq);
680 assert(n >= 1 && n <= VTSCSI_MAXSEG);
681
682 req->vsr_idx = vireq.idx;
683 req->vsr_queue = q;
684 req->vsr_iov_in = &req->vsr_iov[0];
685 req->vsr_niov_in = vireq.readable;
686 req->vsr_iov_out = &req->vsr_iov[vireq.readable];
687 req->vsr_niov_out = vireq.writable;
688
689 /*
690 * Make sure we got at least enough space for the VirtIO-SCSI
691 * command headers. If not, return this request immediately.
692 */
693 if (check_iov_len(req->vsr_iov_out, req->vsr_niov_out,
694 VTSCSI_OUT_HEADER_LEN(q->vsq_sc)) == false) {
695 WPRINTF("ignoring request with insufficient output");
696 req->vsr_cmd_wr->response = VIRTIO_SCSI_S_FAILURE;
697 pci_vtscsi_return_request(q, req, 1);
698 return;
699 }
700
701 if (check_iov_len(req->vsr_iov_in, req->vsr_niov_in,
702 VTSCSI_IN_HEADER_LEN(q->vsq_sc)) == false) {
703 WPRINTF("ignoring request with incomplete header");
704 req->vsr_cmd_wr->response = VIRTIO_SCSI_S_FAILURE;
705 pci_vtscsi_return_request(q, req, 1);
706 return;
707 }
708
709 /*
710 * We have to split the iovec array into a header and data portion each
711 * for input and output.
712 *
713 * We need to start with the output section (at the end of iov) in case
714 * the iovec covering the final part of the output header needs to be
715 * split, in which case split_iov() will move all reamaining iovecs up
716 * by one to make room for a new iovec covering the first part of the
717 * output data portion.
718 */
719 req->vsr_data_iov_out = split_iov(req->vsr_iov_out, &req->vsr_niov_out,
720 VTSCSI_OUT_HEADER_LEN(q->vsq_sc), &req->vsr_data_niov_out);
721
722 /*
723 * Similarly, to not overwrite the first iovec of the output section,
724 * the 2nd call to split_iov() to split the input section must actually
725 * cover the entire iovec array (both input and the already split output
726 * sections).
727 */
728 req->vsr_niov_in += req->vsr_niov_out + req->vsr_data_niov_out;
729
730 req->vsr_data_iov_in = split_iov(req->vsr_iov_in, &req->vsr_niov_in,
731 VTSCSI_IN_HEADER_LEN(q->vsq_sc), &req->vsr_data_niov_in);
732
733 /*
734 * And of course we now have to adjust data_niov_in accordingly.
735 */
736 req->vsr_data_niov_in -= req->vsr_niov_out + req->vsr_data_niov_out;
737
738 /*
739 * iov_to_buf() realloc()s the buffer given as 3rd argument to the
740 * total size of all iovecs it will be copying. Since we've just
741 * truncated it in split_iov(), we know that the size will be
742 * VTSCSI_IN_HEADER_LEN(q->vsq_sc).
743 *
744 * Since we pre-allocated req->vsr_cmd_rd to this size, the realloc()
745 * should never fail.
746 *
747 * This will have to change if we begin allowing config space writes
748 * to change sense size.
749 */
750 assert(iov_to_buf(req->vsr_iov_in, req->vsr_niov_in,
751 (void **)&req->vsr_cmd_rd) == VTSCSI_IN_HEADER_LEN(q->vsq_sc));
752
753 /* Make sure this request addresses a valid LUN. */
754 if (pci_vtscsi_check_lun(req->vsr_cmd_rd->lun) == false) {
755 DPRINTF("I/O request to invalid LUN "
756 "%.2hhx%.2hhx-%.2hhx%.2hhx-%.2hhx%.2hhx-%.2hhx%.2hhx",
757 req->vsr_cmd_rd->lun[0], req->vsr_cmd_rd->lun[1],
758 req->vsr_cmd_rd->lun[2], req->vsr_cmd_rd->lun[3],
759 req->vsr_cmd_rd->lun[4], req->vsr_cmd_rd->lun[5],
760 req->vsr_cmd_rd->lun[6], req->vsr_cmd_rd->lun[7]);
761 req->vsr_cmd_wr->response = VIRTIO_SCSI_S_BAD_TARGET;
762 pci_vtscsi_return_request(q, req, 1);
763 return;
764 }
765
766 pthread_mutex_lock(&q->vsq_rmtx);
767 pci_vtscsi_put_request(&q->vsq_requests, req);
768 pthread_cond_signal(&q->vsq_cv);
769 pthread_mutex_unlock(&q->vsq_rmtx);
770
771 DPRINTF("request <idx=%d> enqueued", vireq.idx);
772 }
773
774 static void
pci_vtscsi_return_request(struct pci_vtscsi_queue * q,struct pci_vtscsi_request * req,int iolen)775 pci_vtscsi_return_request(struct pci_vtscsi_queue *q,
776 struct pci_vtscsi_request *req, int iolen)
777 {
778 void *cmd_rd = req->vsr_cmd_rd;
779 void *cmd_wr = req->vsr_cmd_wr;
780 void *ctl_io = req->vsr_ctl_io;
781 int idx = req->vsr_idx;
782
783 DPRINTF("request <idx=%d> completed, response %d", idx,
784 req->vsr_cmd_wr->response);
785
786 iolen += buf_to_iov(cmd_wr, VTSCSI_OUT_HEADER_LEN(q->vsq_sc),
787 req->vsr_iov_out, req->vsr_niov_out);
788
789 ctl_scsi_zero_io(req->vsr_ctl_io);
790
791 memset(cmd_rd, 0, VTSCSI_IN_HEADER_LEN(q->vsq_sc));
792 memset(cmd_wr, 0, VTSCSI_OUT_HEADER_LEN(q->vsq_sc));
793 memset(req, 0, sizeof(struct pci_vtscsi_request));
794
795 req->vsr_cmd_rd = cmd_rd;
796 req->vsr_cmd_wr = cmd_wr;
797 req->vsr_ctl_io = ctl_io;
798
799 pthread_mutex_lock(&q->vsq_fmtx);
800 pci_vtscsi_put_request(&q->vsq_free_requests, req);
801 pthread_mutex_unlock(&q->vsq_fmtx);
802
803 pthread_mutex_lock(&q->vsq_qmtx);
804 vq_relchain(q->vsq_vq, idx, iolen);
805 vq_endchains(q->vsq_vq, 0);
806 pthread_mutex_unlock(&q->vsq_qmtx);
807 }
808
809 static int
pci_vtscsi_request_handle(struct pci_vtscsi_softc * sc,struct pci_vtscsi_request * req)810 pci_vtscsi_request_handle(struct pci_vtscsi_softc *sc,
811 struct pci_vtscsi_request *req)
812 {
813 union ctl_io *io = req->vsr_ctl_io;
814 void *ext_data_ptr = NULL;
815 uint32_t ext_data_len = 0, ext_sg_entries = 0;
816 int err, nxferred;
817
818 io->io_hdr.nexus.initid = sc->vss_iid;
819 io->io_hdr.nexus.targ_lun = pci_vtscsi_get_lun(req->vsr_cmd_rd->lun);
820
821 io->io_hdr.io_type = CTL_IO_SCSI;
822
823 if (req->vsr_data_niov_in > 0) {
824 ext_data_ptr = (void *)req->vsr_data_iov_in;
825 ext_sg_entries = req->vsr_data_niov_in;
826 ext_data_len = count_iov(req->vsr_data_iov_in,
827 req->vsr_data_niov_in);
828 io->io_hdr.flags |= CTL_FLAG_DATA_OUT;
829 } else if (req->vsr_data_niov_out > 0) {
830 ext_data_ptr = (void *)req->vsr_data_iov_out;
831 ext_sg_entries = req->vsr_data_niov_out;
832 ext_data_len = count_iov(req->vsr_data_iov_out,
833 req->vsr_data_niov_out);
834 io->io_hdr.flags |= CTL_FLAG_DATA_IN;
835 }
836
837 io->scsiio.sense_len = sc->vss_config.sense_size;
838 io->scsiio.tag_num = req->vsr_cmd_rd->id;
839 io->io_hdr.flags |= CTL_FLAG_USER_TAG;
840 switch (req->vsr_cmd_rd->task_attr) {
841 case VIRTIO_SCSI_S_ORDERED:
842 io->scsiio.tag_type = CTL_TAG_ORDERED;
843 break;
844 case VIRTIO_SCSI_S_HEAD:
845 io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE;
846 break;
847 case VIRTIO_SCSI_S_ACA:
848 io->scsiio.tag_type = CTL_TAG_ACA;
849 break;
850 case VIRTIO_SCSI_S_SIMPLE:
851 default:
852 io->scsiio.tag_type = CTL_TAG_SIMPLE;
853 break;
854 }
855 io->scsiio.ext_sg_entries = ext_sg_entries;
856 io->scsiio.ext_data_ptr = ext_data_ptr;
857 io->scsiio.ext_data_len = ext_data_len;
858 io->scsiio.ext_data_filled = 0;
859 io->scsiio.cdb_len = sc->vss_config.cdb_size;
860 memcpy(io->scsiio.cdb, req->vsr_cmd_rd->cdb, sc->vss_config.cdb_size);
861
862 if (pci_vtscsi_debug) {
863 struct sbuf *sb = sbuf_new_auto();
864 ctl_io_sbuf(io, sb);
865 sbuf_finish(sb);
866 DPRINTF("%s", sbuf_data(sb));
867 sbuf_delete(sb);
868 }
869
870 err = ioctl(sc->vss_ctl_fd, CTL_IO, io);
871 if (err != 0) {
872 WPRINTF("CTL_IO: err=%d (%s)", errno, strerror(errno));
873 req->vsr_cmd_wr->response = VIRTIO_SCSI_S_FAILURE;
874 } else {
875 req->vsr_cmd_wr->sense_len =
876 MIN(io->scsiio.sense_len, sc->vss_config.sense_size);
877 req->vsr_cmd_wr->residual = ext_data_len -
878 io->scsiio.ext_data_filled;
879 req->vsr_cmd_wr->status = io->scsiio.scsi_status;
880 req->vsr_cmd_wr->response = VIRTIO_SCSI_S_OK;
881 memcpy(&req->vsr_cmd_wr->sense, &io->scsiio.sense_data,
882 req->vsr_cmd_wr->sense_len);
883 }
884
885 nxferred = io->scsiio.ext_data_filled;
886 return (nxferred);
887 }
888
889 static void
pci_vtscsi_controlq_notify(void * vsc,struct vqueue_info * vq)890 pci_vtscsi_controlq_notify(void *vsc, struct vqueue_info *vq)
891 {
892 struct pci_vtscsi_softc *sc;
893 struct iovec iov[VTSCSI_MAXSEG];
894 struct vi_req req;
895 void *buf = NULL;
896 size_t bufsize;
897 int n;
898
899 sc = vsc;
900
901 while (vq_has_descs(vq)) {
902 n = vq_getchain(vq, iov, VTSCSI_MAXSEG, &req);
903 assert(n >= 1 && n <= VTSCSI_MAXSEG);
904
905 bufsize = iov_to_buf(iov, n, &buf);
906 pci_vtscsi_control_handle(sc, buf, bufsize);
907 buf_to_iov((uint8_t *)buf, bufsize, iov, n);
908
909 /*
910 * Release this chain and handle more
911 */
912 vq_relchain(vq, req.idx, bufsize);
913 }
914 vq_endchains(vq, 1); /* Generate interrupt if appropriate. */
915 free(buf);
916 }
917
918 static void
pci_vtscsi_eventq_notify(void * vsc __unused,struct vqueue_info * vq)919 pci_vtscsi_eventq_notify(void *vsc __unused, struct vqueue_info *vq)
920 {
921 vq_kick_disable(vq);
922 }
923
924 static void
pci_vtscsi_requestq_notify(void * vsc,struct vqueue_info * vq)925 pci_vtscsi_requestq_notify(void *vsc, struct vqueue_info *vq)
926 {
927 while (vq_has_descs(vq)) {
928 pci_vtscsi_queue_request(vsc, vq);
929 }
930 }
931
932 static int
pci_vtscsi_init_queue(struct pci_vtscsi_softc * sc,struct pci_vtscsi_queue * queue,int num)933 pci_vtscsi_init_queue(struct pci_vtscsi_softc *sc,
934 struct pci_vtscsi_queue *queue, int num)
935 {
936 struct pci_vtscsi_worker *workers;
937 char tname[MAXCOMLEN + 1];
938 int i;
939
940 queue->vsq_sc = sc;
941 queue->vsq_vq = &sc->vss_vq[num + 2];
942
943 pthread_mutex_init(&queue->vsq_rmtx, NULL);
944 pthread_mutex_init(&queue->vsq_fmtx, NULL);
945 pthread_mutex_init(&queue->vsq_qmtx, NULL);
946 pthread_cond_init(&queue->vsq_cv, NULL);
947 STAILQ_INIT(&queue->vsq_requests);
948 STAILQ_INIT(&queue->vsq_free_requests);
949 LIST_INIT(&queue->vsq_workers);
950
951 for (i = 0; i < VTSCSI_RINGSZ; i++) {
952 struct pci_vtscsi_request *req;
953
954 req = pci_vtscsi_alloc_request(sc);
955 if (req == NULL)
956 goto fail;
957
958 pci_vtscsi_put_request(&queue->vsq_free_requests, req);
959 }
960
961 workers = calloc(VTSCSI_THR_PER_Q, sizeof(struct pci_vtscsi_worker));
962 if (workers == NULL)
963 goto fail;
964
965 for (i = 0; i < VTSCSI_THR_PER_Q; i++) {
966 workers[i].vsw_queue = queue;
967
968 pthread_create(&workers[i].vsw_thread, NULL, &pci_vtscsi_proc,
969 (void *)&workers[i]);
970
971 snprintf(tname, sizeof(tname), "vtscsi:%d-%d", num, i);
972 pthread_set_name_np(workers[i].vsw_thread, tname);
973 LIST_INSERT_HEAD(&queue->vsq_workers, &workers[i], vsw_link);
974 }
975
976 return (0);
977
978 fail:
979 pci_vtscsi_destroy_queue(queue);
980
981 return (-1);
982
983 }
984
985 static void
pci_vtscsi_destroy_queue(struct pci_vtscsi_queue * queue)986 pci_vtscsi_destroy_queue(struct pci_vtscsi_queue *queue)
987 {
988 if (queue->vsq_sc == NULL)
989 return;
990
991 for (int i = VTSCSI_RINGSZ; i > 0; i--) {
992 struct pci_vtscsi_request *req;
993
994 if (STAILQ_EMPTY(&queue->vsq_free_requests))
995 break;
996
997 req = pci_vtscsi_get_request(&queue->vsq_free_requests);
998 pci_vtscsi_free_request(req);
999 }
1000
1001 pthread_cond_destroy(&queue->vsq_cv);
1002 pthread_mutex_destroy(&queue->vsq_qmtx);
1003 pthread_mutex_destroy(&queue->vsq_fmtx);
1004 pthread_mutex_destroy(&queue->vsq_rmtx);
1005 }
1006
1007 static int
pci_vtscsi_legacy_config(nvlist_t * nvl,const char * opts)1008 pci_vtscsi_legacy_config(nvlist_t *nvl, const char *opts)
1009 {
1010 char *cp, *devname;
1011
1012 if (opts == NULL)
1013 return (0);
1014
1015 cp = strchr(opts, ',');
1016 if (cp == NULL) {
1017 set_config_value_node(nvl, "dev", opts);
1018 return (0);
1019 }
1020 devname = strndup(opts, cp - opts);
1021 set_config_value_node(nvl, "dev", devname);
1022 free(devname);
1023 return (pci_parse_legacy_config(nvl, cp + 1));
1024 }
1025
1026 static int
pci_vtscsi_init(struct pci_devinst * pi,nvlist_t * nvl)1027 pci_vtscsi_init(struct pci_devinst *pi, nvlist_t *nvl)
1028 {
1029 struct pci_vtscsi_softc *sc;
1030 const char *devname, *value;
1031 int err;
1032 int i;
1033
1034 sc = calloc(1, sizeof(struct pci_vtscsi_softc));
1035 if (sc == NULL)
1036 return (-1);
1037
1038 value = get_config_value_node(nvl, "iid");
1039 if (value != NULL)
1040 sc->vss_iid = strtoul(value, NULL, 10);
1041
1042 value = get_config_value_node(nvl, "bootindex");
1043 if (value != NULL) {
1044 if (pci_emul_add_boot_device(pi, atoi(value))) {
1045 EPRINTLN("Invalid bootindex %d", atoi(value));
1046 goto fail;
1047 }
1048 }
1049
1050 devname = get_config_value_node(nvl, "dev");
1051 if (devname == NULL)
1052 devname = "/dev/cam/ctl";
1053 sc->vss_ctl_fd = open(devname, O_RDWR);
1054 if (sc->vss_ctl_fd < 0) {
1055 WPRINTF("cannot open %s: %s", devname, strerror(errno));
1056 goto fail;
1057 }
1058
1059 pthread_mutex_init(&sc->vss_mtx, NULL);
1060
1061 vi_softc_linkup(&sc->vss_vs, &vtscsi_vi_consts, sc, pi, sc->vss_vq);
1062 sc->vss_vs.vs_mtx = &sc->vss_mtx;
1063
1064 /*
1065 * Perform a "reset" before we set up our queues.
1066 *
1067 * This will write the default config into vss_config, which is used
1068 * by the rest of the driver to get the request header size. Note that
1069 * if we ever allow the guest to override sense size through config
1070 * space writes, pre-allocation of I/O requests will have to change
1071 * accordingly.
1072 */
1073 pthread_mutex_lock(&sc->vss_mtx);
1074 pci_vtscsi_reset(sc);
1075 pthread_mutex_unlock(&sc->vss_mtx);
1076
1077 /* controlq */
1078 sc->vss_vq[0].vq_qsize = VTSCSI_RINGSZ;
1079 sc->vss_vq[0].vq_notify = pci_vtscsi_controlq_notify;
1080
1081 /* eventq */
1082 sc->vss_vq[1].vq_qsize = VTSCSI_RINGSZ;
1083 sc->vss_vq[1].vq_notify = pci_vtscsi_eventq_notify;
1084
1085 /* request queues */
1086 for (i = 2; i < VTSCSI_MAXQ; i++) {
1087 sc->vss_vq[i].vq_qsize = VTSCSI_RINGSZ;
1088 sc->vss_vq[i].vq_notify = pci_vtscsi_requestq_notify;
1089
1090 err = pci_vtscsi_init_queue(sc, &sc->vss_queues[i - 2], i - 2);
1091 if (err != 0)
1092 goto fail;
1093 }
1094
1095 /* initialize config space */
1096 pci_set_cfgdata16(pi, PCIR_DEVICE, VIRTIO_DEV_SCSI);
1097 pci_set_cfgdata16(pi, PCIR_VENDOR, VIRTIO_VENDOR);
1098 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_STORAGE);
1099 pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_ID_SCSI);
1100 pci_set_cfgdata16(pi, PCIR_SUBVEND_0, VIRTIO_VENDOR);
1101
1102 if (vi_intr_init(&sc->vss_vs, 1, fbsdrun_virtio_msix()))
1103 goto fail;
1104
1105 vi_set_io_bar(&sc->vss_vs, 0);
1106
1107 return (0);
1108
1109 fail:
1110 for (i = 2; i < VTSCSI_MAXQ; i++)
1111 pci_vtscsi_destroy_queue(&sc->vss_queues[i - 2]);
1112
1113 if (sc->vss_ctl_fd > 0)
1114 close(sc->vss_ctl_fd);
1115
1116 free(sc);
1117 return (-1);
1118 }
1119
1120
1121 static const struct pci_devemu pci_de_vscsi = {
1122 .pe_emu = "virtio-scsi",
1123 .pe_init = pci_vtscsi_init,
1124 .pe_legacy_config = pci_vtscsi_legacy_config,
1125 .pe_barwrite = vi_pci_write,
1126 .pe_barread = vi_pci_read
1127 };
1128 PCI_EMUL_SET(pci_de_vscsi);
1129