1 /*-
2 * Copyright (c) 2017 Juniper Networks, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 *
25 */
26 /*
27 * The Virtio 9P transport driver. This file contains all functions related to
28 * the virtqueue infrastructure which include creating the virtqueue, host
29 * interactions, interrupts etc.
30 */
31
32 #include <sys/param.h>
33 #include <sys/errno.h>
34 #include <sys/module.h>
35 #include <sys/sglist.h>
36 #include <sys/queue.h>
37 #include <sys/bus.h>
38 #include <sys/kthread.h>
39 #include <sys/condvar.h>
40 #include <sys/sysctl.h>
41
42 #include <machine/bus.h>
43
44 #include <fs/p9fs/p9_client.h>
45 #include <fs/p9fs/p9_debug.h>
46 #include <fs/p9fs/p9_protocol.h>
47 #include <fs/p9fs/p9_transport.h>
48
49 #include <dev/virtio/virtio.h>
50 #include <dev/virtio/virtqueue.h>
51 #include <dev/virtio/virtio_ring.h>
52 #include <dev/virtio/p9fs/virtio_p9fs.h>
53
54 #define VT9P_MTX(_sc) (&(_sc)->vt9p_mtx)
55 #define VT9P_LOCK(_sc) mtx_lock(VT9P_MTX(_sc))
56 #define VT9P_UNLOCK(_sc) mtx_unlock(VT9P_MTX(_sc))
57 #define VT9P_LOCK_INIT(_sc) mtx_init(VT9P_MTX(_sc), \
58 "VIRTIO 9P CHAN lock", NULL, MTX_DEF)
59 #define VT9P_LOCK_DESTROY(_sc) mtx_destroy(VT9P_MTX(_sc))
60 #define MAX_SUPPORTED_SGS 20
61 static MALLOC_DEFINE(M_P9FS_MNTTAG, "p9fs_mount_tag", "P9fs Mounttag");
62
63 struct vt9p_softc {
64 device_t vt9p_dev;
65 struct mtx vt9p_mtx;
66 struct sglist *vt9p_sglist;
67 struct cv submit_cv;
68 bool busy;
69 struct virtqueue *vt9p_vq;
70 int max_nsegs;
71 uint16_t mount_tag_len;
72 char *mount_tag;
73 STAILQ_ENTRY(vt9p_softc) chan_next;
74 };
75
76 /* Global channel list, Each channel will correspond to a mount point */
77 static STAILQ_HEAD( ,vt9p_softc) global_chan_list =
78 STAILQ_HEAD_INITIALIZER(global_chan_list);
79 struct mtx global_chan_list_mtx;
80 MTX_SYSINIT(global_chan_list_mtx, &global_chan_list_mtx, "9pglobal", MTX_DEF);
81
82 static struct virtio_feature_desc virtio_9p_feature_desc[] = {
83 { VIRTIO_9PNET_F_MOUNT_TAG, "9PMountTag" },
84 { 0, NULL }
85 };
86
87 VIRTIO_SIMPLE_PNPINFO(virtio_p9fs, VIRTIO_ID_9P, "VirtIO 9P Transport");
88
89 /* We don't currently allow canceling of virtio requests */
90 static int
vt9p_cancel(void * handle,struct p9_req_t * req)91 vt9p_cancel(void *handle, struct p9_req_t *req)
92 {
93 return (1);
94 }
95
96 SYSCTL_NODE(_vfs, OID_AUTO, 9p, CTLFLAG_RW, 0, "9P File System Protocol");
97
98 /*
99 * Maximum number of seconds vt9p_request thread sleep waiting for an
100 * ack from the host, before exiting
101 */
102 static unsigned int vt9p_ackmaxidle = 120;
103 SYSCTL_UINT(_vfs_9p, OID_AUTO, ackmaxidle, CTLFLAG_RW, &vt9p_ackmaxidle, 0,
104 "Maximum time request thread waits for ack from host");
105
106 /*
107 * Wait for completion of a p9 request.
108 *
109 * This routine will sleep and release the chan mtx during the period.
110 * chan mtx will be acquired again upon return.
111 */
112 static int
vt9p_req_wait(struct vt9p_softc * chan,struct p9_req_t * req)113 vt9p_req_wait(struct vt9p_softc *chan, struct p9_req_t *req)
114 {
115 KASSERT(req->tc->tag != req->rc->tag,
116 ("%s: request %p already completed", __func__, req));
117
118 if (msleep(req, VT9P_MTX(chan), 0, "chan lock", vt9p_ackmaxidle * hz)) {
119 /*
120 * Waited for 120s. No response from host.
121 * Can't wait for ever..
122 */
123 P9_DEBUG(ERROR, "Timeout after waiting %u seconds"
124 "for an ack from host\n", vt9p_ackmaxidle);
125 return (EIO);
126 }
127 KASSERT(req->tc->tag == req->rc->tag,
128 ("%s spurious event on request %p", __func__, req));
129 return (0);
130 }
131
132 /*
133 * Request handler. This is called for every request submitted to the host
134 * It basically maps the tc/rc buffers to sg lists and submits the requests
135 * into the virtqueue. Since we have implemented a synchronous version, the
136 * submission thread sleeps until the ack in the interrupt wakes it up. Once
137 * it wakes up, it returns back to the P9fs layer. The rc buffer is then
138 * processed and completed to its upper layers.
139 */
140 static int
vt9p_request(void * handle,struct p9_req_t * req)141 vt9p_request(void *handle, struct p9_req_t *req)
142 {
143 int error;
144 struct vt9p_softc *chan;
145 int readable, writable;
146 struct sglist *sg;
147 struct virtqueue *vq;
148
149 chan = handle;
150 sg = chan->vt9p_sglist;
151 vq = chan->vt9p_vq;
152
153 P9_DEBUG(TRANS, "%s: req=%p\n", __func__, req);
154
155 /* Grab the channel lock*/
156 VT9P_LOCK(chan);
157 req_retry:
158 sglist_reset(sg);
159 /* Handle out VirtIO ring buffers */
160 error = sglist_append(sg, req->tc->sdata, req->tc->size);
161 if (error != 0) {
162 P9_DEBUG(ERROR, "%s: sglist append failed\n", __func__);
163 VT9P_UNLOCK(chan);
164 return (error);
165 }
166 readable = sg->sg_nseg;
167
168 error = sglist_append(sg, req->rc->sdata, req->rc->capacity);
169 if (error != 0) {
170 P9_DEBUG(ERROR, "%s: sglist append failed\n", __func__);
171 VT9P_UNLOCK(chan);
172 return (error);
173 }
174 writable = sg->sg_nseg - readable;
175
176 error = virtqueue_enqueue(vq, req, sg, readable, writable);
177 if (error != 0) {
178 if (error == ENOSPC) {
179 /*
180 * Condvar for the submit queue. Unlock the chan
181 * since wakeup needs one.
182 */
183 cv_wait(&chan->submit_cv, VT9P_MTX(chan));
184 P9_DEBUG(TRANS, "%s: retry virtio request\n", __func__);
185 goto req_retry;
186 } else {
187 P9_DEBUG(ERROR, "%s: virtio enuqueue failed \n", __func__);
188 VT9P_UNLOCK(chan);
189 return (EIO);
190 }
191 }
192
193 /* We have to notify */
194 virtqueue_notify(vq);
195
196 error = vt9p_req_wait(chan, req);
197 if (error != 0) {
198 VT9P_UNLOCK(chan);
199 return (error);
200 }
201
202 VT9P_UNLOCK(chan);
203
204 P9_DEBUG(TRANS, "%s: virtio request kicked\n", __func__);
205
206 return (0);
207 }
208
209 /*
210 * Completion of the request from the virtqueue. This interrupt handler is
211 * setup at initialization and is called for every completing request. It
212 * just wakes up the sleeping submission requests.
213 */
214 static void
vt9p_intr_complete(void * xsc)215 vt9p_intr_complete(void *xsc)
216 {
217 struct vt9p_softc *chan;
218 struct virtqueue *vq;
219 struct p9_req_t *curreq;
220
221 chan = (struct vt9p_softc *)xsc;
222 vq = chan->vt9p_vq;
223
224 P9_DEBUG(TRANS, "%s: completing\n", __func__);
225
226 VT9P_LOCK(chan);
227 again:
228 while ((curreq = virtqueue_dequeue(vq, NULL)) != NULL) {
229 curreq->rc->tag = curreq->tc->tag;
230 wakeup_one(curreq);
231 }
232 if (virtqueue_enable_intr(vq) != 0) {
233 virtqueue_disable_intr(vq);
234 goto again;
235 }
236 cv_signal(&chan->submit_cv);
237 VT9P_UNLOCK(chan);
238 }
239
240 /*
241 * Allocation of the virtqueue with interrupt complete routines.
242 */
243 static int
vt9p_alloc_virtqueue(struct vt9p_softc * sc)244 vt9p_alloc_virtqueue(struct vt9p_softc *sc)
245 {
246 struct vq_alloc_info vq_info;
247 device_t dev;
248
249 dev = sc->vt9p_dev;
250
251 VQ_ALLOC_INFO_INIT(&vq_info, sc->max_nsegs,
252 vt9p_intr_complete, sc, &sc->vt9p_vq,
253 "%s request", device_get_nameunit(dev));
254
255 return (virtio_alloc_virtqueues(dev, 1, &vq_info));
256 }
257
258 /* Probe for existence of 9P virtio channels */
259 static int
vt9p_probe(device_t dev)260 vt9p_probe(device_t dev)
261 {
262 return (VIRTIO_SIMPLE_PROBE(dev, virtio_p9fs));
263 }
264
265 static void
vt9p_stop(struct vt9p_softc * sc)266 vt9p_stop(struct vt9p_softc *sc)
267 {
268
269 /* Device specific stops .*/
270 virtqueue_disable_intr(sc->vt9p_vq);
271 virtio_stop(sc->vt9p_dev);
272 }
273
274 /* Detach the 9P virtio PCI device */
275 static int
vt9p_detach(device_t dev)276 vt9p_detach(device_t dev)
277 {
278 struct vt9p_softc *sc;
279
280 sc = device_get_softc(dev);
281 VT9P_LOCK(sc);
282 vt9p_stop(sc);
283 VT9P_UNLOCK(sc);
284
285 if (sc->vt9p_sglist) {
286 sglist_free(sc->vt9p_sglist);
287 sc->vt9p_sglist = NULL;
288 }
289 if (sc->mount_tag) {
290 free(sc->mount_tag, M_P9FS_MNTTAG);
291 sc->mount_tag = NULL;
292 }
293 mtx_lock(&global_chan_list_mtx);
294 STAILQ_REMOVE(&global_chan_list, sc, vt9p_softc, chan_next);
295 mtx_unlock(&global_chan_list_mtx);
296
297 VT9P_LOCK_DESTROY(sc);
298 cv_destroy(&sc->submit_cv);
299
300 return (0);
301 }
302
303 /* Attach the 9P virtio PCI device */
304 static int
vt9p_attach(device_t dev)305 vt9p_attach(device_t dev)
306 {
307 struct sysctl_ctx_list *ctx;
308 struct sysctl_oid *tree;
309 struct vt9p_softc *chan;
310 char *mount_tag;
311 int error;
312 uint16_t mount_tag_len;
313
314 chan = device_get_softc(dev);
315 chan->vt9p_dev = dev;
316
317 /* Init the channel lock. */
318 VT9P_LOCK_INIT(chan);
319 /* Initialize the condition variable */
320 cv_init(&chan->submit_cv, "Conditional variable for submit queue" );
321 chan->max_nsegs = MAX_SUPPORTED_SGS;
322 chan->vt9p_sglist = sglist_alloc(chan->max_nsegs, M_WAITOK);
323
324 /* Negotiate the features from the host */
325 virtio_set_feature_desc(dev, virtio_9p_feature_desc);
326 virtio_negotiate_features(dev, VIRTIO_9PNET_F_MOUNT_TAG);
327
328 /*
329 * If mount tag feature is supported read the mount tag
330 * from device config
331 */
332 if (virtio_with_feature(dev, VIRTIO_9PNET_F_MOUNT_TAG))
333 mount_tag_len = virtio_read_dev_config_2(dev,
334 offsetof(struct virtio_9pnet_config, mount_tag_len));
335 else {
336 error = EINVAL;
337 P9_DEBUG(ERROR, "%s: Mount tag feature not supported by host\n", __func__);
338 goto out;
339 }
340 mount_tag = malloc(mount_tag_len + 1, M_P9FS_MNTTAG,
341 M_WAITOK | M_ZERO);
342
343 virtio_read_device_config_array(dev,
344 offsetof(struct virtio_9pnet_config, mount_tag),
345 mount_tag, 1, mount_tag_len);
346
347 device_printf(dev, "Mount tag: %s\n", mount_tag);
348
349 mount_tag_len++;
350 chan->mount_tag_len = mount_tag_len;
351 chan->mount_tag = mount_tag;
352
353 ctx = device_get_sysctl_ctx(dev);
354 tree = device_get_sysctl_tree(dev);
355 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "p9fs_mount_tag",
356 CTLFLAG_RD, chan->mount_tag, 0, "Mount tag");
357
358 /* We expect one virtqueue, for requests. */
359 error = vt9p_alloc_virtqueue(chan);
360 if (error != 0) {
361 P9_DEBUG(ERROR, "%s: Allocating the virtqueue failed \n", __func__);
362 goto out;
363 }
364 error = virtio_setup_intr(dev, INTR_TYPE_MISC|INTR_MPSAFE);
365 if (error != 0) {
366 P9_DEBUG(ERROR, "%s: Cannot setup virtqueue interrupt\n", __func__);
367 goto out;
368 }
369 error = virtqueue_enable_intr(chan->vt9p_vq);
370 if (error != 0) {
371 P9_DEBUG(ERROR, "%s: Cannot enable virtqueue interrupt\n", __func__);
372 goto out;
373 }
374
375 mtx_lock(&global_chan_list_mtx);
376 /* Insert the channel in global channel list */
377 STAILQ_INSERT_HEAD(&global_chan_list, chan, chan_next);
378 mtx_unlock(&global_chan_list_mtx);
379
380 return (0);
381 out:
382 /* Something went wrong, detach the device */
383 vt9p_detach(dev);
384 return (error);
385 }
386
387 /*
388 * Allocate a new virtio channel. This sets up a transport channel
389 * for 9P communication
390 */
391 static int
vt9p_create(const char * mount_tag,void ** handlep)392 vt9p_create(const char *mount_tag, void **handlep)
393 {
394 struct vt9p_softc *sc, *chan;
395
396 chan = NULL;
397
398 /*
399 * Find out the corresponding channel for a client from global list
400 * of channels based on mount tag and attach it to client
401 */
402 mtx_lock(&global_chan_list_mtx);
403 STAILQ_FOREACH(sc, &global_chan_list, chan_next) {
404 if (!strcmp(sc->mount_tag, mount_tag)) {
405 chan = sc;
406 break;
407 }
408 }
409 mtx_unlock(&global_chan_list_mtx);
410
411 /*
412 * If chan is already attached to a client then it cannot be used for
413 * another client.
414 */
415 if (chan && chan->busy) {
416 //p9_debug(TRANS, "Channel busy: used by clnt=%p\n", chan->client);
417 return (EBUSY);
418 }
419
420 /* If we dont have one, for now bail out.*/
421 if (chan) {
422 *handlep = (void *)chan;
423 chan->busy = true;
424 } else {
425 P9_DEBUG(TRANS, "%s: No Global channel with mount_tag=%s\n",
426 __func__, mount_tag);
427 return (EINVAL);
428 }
429
430 return (0);
431 }
432
433 static void
vt9p_close(void * handle)434 vt9p_close(void *handle)
435 {
436 struct vt9p_softc *chan = handle;
437
438 chan->busy = false;
439 }
440
441 static struct p9_trans_module vt9p_trans = {
442 .name = "virtio",
443 .create = vt9p_create,
444 .close = vt9p_close,
445 .request = vt9p_request,
446 .cancel = vt9p_cancel,
447 };
448
449 static device_method_t vt9p_mthds[] = {
450 /* Device methods. */
451 DEVMETHOD(device_probe, vt9p_probe),
452 DEVMETHOD(device_attach, vt9p_attach),
453 DEVMETHOD(device_detach, vt9p_detach),
454 DEVMETHOD_END
455 };
456
457 static driver_t vt9p_drv = {
458 "virtio_p9fs",
459 vt9p_mthds,
460 sizeof(struct vt9p_softc)
461 };
462
463 static int
vt9p_modevent(module_t mod,int type,void * unused)464 vt9p_modevent(module_t mod, int type, void *unused)
465 {
466 int error;
467
468 error = 0;
469
470 switch (type) {
471 case MOD_LOAD:
472 p9_init_zones();
473 p9_register_trans(&vt9p_trans);
474 break;
475 case MOD_UNLOAD:
476 p9_destroy_zones();
477 break;
478 case MOD_SHUTDOWN:
479 break;
480 default:
481 error = EOPNOTSUPP;
482 break;
483 }
484 return (error);
485 }
486
487 VIRTIO_DRIVER_MODULE(virtio_p9fs, vt9p_drv, vt9p_modevent, NULL);
488 MODULE_VERSION(virtio_p9fs, 1);
489 MODULE_DEPEND(virtio_p9fs, virtio, 1, 1, 1);
490 MODULE_DEPEND(virtio_p9fs, p9fs, 1, 1, 1);
491