1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Fd transport layer. Includes deprecated socket layer.
4 *
5 * Copyright (C) 2006 by Russ Cox <rsc@swtch.com>
6 * Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
7 * Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com>
8 * Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com>
9 */
10
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13 #include <linux/in.h>
14 #include <linux/in6.h>
15 #include <linux/module.h>
16 #include <linux/net.h>
17 #include <linux/ipv6.h>
18 #include <linux/kthread.h>
19 #include <linux/errno.h>
20 #include <linux/kernel.h>
21 #include <linux/un.h>
22 #include <linux/uaccess.h>
23 #include <linux/inet.h>
24 #include <linux/file.h>
25 #include <linux/parser.h>
26 #include <linux/slab.h>
27 #include <linux/seq_file.h>
28 #include <net/9p/9p.h>
29 #include <net/9p/client.h>
30 #include <net/9p/transport.h>
31
32 #include <linux/syscalls.h> /* killme */
33
34 #define P9_PORT 564
35 #define MAX_SOCK_BUF (1024*1024)
36 #define MAXPOLLWADDR 2
37
38 static struct p9_trans_module p9_tcp_trans;
39 static struct p9_trans_module p9_fd_trans;
40
41 /**
42 * struct p9_fd_opts - per-transport options
43 * @rfd: file descriptor for reading (trans=fd)
44 * @wfd: file descriptor for writing (trans=fd)
45 * @port: port to connect to (trans=tcp)
46 * @privport: port is privileged
47 */
48
49 struct p9_fd_opts {
50 int rfd;
51 int wfd;
52 u16 port;
53 bool privport;
54 };
55
56 /*
57 * Option Parsing (code inspired by NFS code)
58 * - a little lazy - parse all fd-transport options
59 */
60
61 enum {
62 /* Options that take integer arguments */
63 Opt_port, Opt_rfdno, Opt_wfdno, Opt_err,
64 /* Options that take no arguments */
65 Opt_privport,
66 };
67
68 static const match_table_t tokens = {
69 {Opt_port, "port=%u"},
70 {Opt_rfdno, "rfdno=%u"},
71 {Opt_wfdno, "wfdno=%u"},
72 {Opt_privport, "privport"},
73 {Opt_err, NULL},
74 };
75
76 enum {
77 Rworksched = 1, /* read work scheduled or running */
78 Rpending = 2, /* can read */
79 Wworksched = 4, /* write work scheduled or running */
80 Wpending = 8, /* can write */
81 };
82
83 struct p9_poll_wait {
84 struct p9_conn *conn;
85 wait_queue_entry_t wait;
86 wait_queue_head_t *wait_addr;
87 };
88
89 /**
90 * struct p9_conn - fd mux connection state information
91 * @mux_list: list link for mux to manage multiple connections (?)
92 * @client: reference to client instance for this connection
93 * @err: error state
94 * @req_lock: lock protecting req_list and requests statuses
95 * @req_list: accounting for requests which have been sent
96 * @unsent_req_list: accounting for requests that haven't been sent
97 * @rreq: read request
98 * @wreq: write request
99 * @tmp_buf: temporary buffer to read in header
100 * @rc: temporary fcall for reading current frame
101 * @wpos: write position for current frame
102 * @wsize: amount of data to write for current frame
103 * @wbuf: current write buffer
104 * @poll_pending_link: pending links to be polled per conn
105 * @poll_wait: array of wait_q's for various worker threads
106 * @pt: poll state
107 * @rq: current read work
108 * @wq: current write work
109 * @wsched: ????
110 *
111 */
112
113 struct p9_conn {
114 struct list_head mux_list;
115 struct p9_client *client;
116 int err;
117 spinlock_t req_lock;
118 struct list_head req_list;
119 struct list_head unsent_req_list;
120 struct p9_req_t *rreq;
121 struct p9_req_t *wreq;
122 char tmp_buf[P9_HDRSZ];
123 struct p9_fcall rc;
124 int wpos;
125 int wsize;
126 char *wbuf;
127 struct list_head poll_pending_link;
128 struct p9_poll_wait poll_wait[MAXPOLLWADDR];
129 poll_table pt;
130 struct work_struct rq;
131 struct work_struct wq;
132 unsigned long wsched;
133 };
134
135 /**
136 * struct p9_trans_fd - transport state
137 * @rd: reference to file to read from
138 * @wr: reference of file to write to
139 * @conn: connection state reference
140 *
141 */
142
143 struct p9_trans_fd {
144 struct file *rd;
145 struct file *wr;
146 struct p9_conn conn;
147 };
148
149 static void p9_poll_workfn(struct work_struct *work);
150
151 static DEFINE_SPINLOCK(p9_poll_lock);
152 static LIST_HEAD(p9_poll_pending_list);
153 static DECLARE_WORK(p9_poll_work, p9_poll_workfn);
154
155 static unsigned int p9_ipport_resv_min = P9_DEF_MIN_RESVPORT;
156 static unsigned int p9_ipport_resv_max = P9_DEF_MAX_RESVPORT;
157
p9_mux_poll_stop(struct p9_conn * m)158 static void p9_mux_poll_stop(struct p9_conn *m)
159 {
160 unsigned long flags;
161 int i;
162
163 for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
164 struct p9_poll_wait *pwait = &m->poll_wait[i];
165
166 if (pwait->wait_addr) {
167 remove_wait_queue(pwait->wait_addr, &pwait->wait);
168 pwait->wait_addr = NULL;
169 }
170 }
171
172 spin_lock_irqsave(&p9_poll_lock, flags);
173 list_del_init(&m->poll_pending_link);
174 spin_unlock_irqrestore(&p9_poll_lock, flags);
175
176 flush_work(&p9_poll_work);
177 }
178
179 /**
180 * p9_conn_cancel - cancel all pending requests with error
181 * @m: mux data
182 * @err: error code
183 *
184 */
185
p9_conn_cancel(struct p9_conn * m,int err)186 static void p9_conn_cancel(struct p9_conn *m, int err)
187 {
188 struct p9_req_t *req, *rtmp;
189 LIST_HEAD(cancel_list);
190
191 p9_debug(P9_DEBUG_ERROR, "mux %p err %d\n", m, err);
192
193 spin_lock(&m->req_lock);
194
195 if (READ_ONCE(m->err)) {
196 spin_unlock(&m->req_lock);
197 return;
198 }
199
200 WRITE_ONCE(m->err, err);
201 ASSERT_EXCLUSIVE_WRITER(m->err);
202
203 list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
204 list_move(&req->req_list, &cancel_list);
205 WRITE_ONCE(req->status, REQ_STATUS_ERROR);
206 }
207 list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
208 list_move(&req->req_list, &cancel_list);
209 WRITE_ONCE(req->status, REQ_STATUS_ERROR);
210 }
211
212 spin_unlock(&m->req_lock);
213
214 list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
215 p9_debug(P9_DEBUG_ERROR, "call back req %p\n", req);
216 list_del(&req->req_list);
217 if (!req->t_err)
218 req->t_err = err;
219 p9_client_cb(m->client, req, REQ_STATUS_ERROR);
220 }
221 }
222
223 static __poll_t
p9_fd_poll(struct p9_client * client,struct poll_table_struct * pt,int * err)224 p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt, int *err)
225 {
226 __poll_t ret;
227 struct p9_trans_fd *ts = NULL;
228
229 if (client && client->status == Connected)
230 ts = client->trans;
231
232 if (!ts) {
233 if (err)
234 *err = -EREMOTEIO;
235 return EPOLLERR;
236 }
237
238 ret = vfs_poll(ts->rd, pt);
239 if (ts->rd != ts->wr)
240 ret = (ret & ~EPOLLOUT) | (vfs_poll(ts->wr, pt) & ~EPOLLIN);
241 return ret;
242 }
243
244 /**
245 * p9_fd_read- read from a fd
246 * @client: client instance
247 * @v: buffer to receive data into
248 * @len: size of receive buffer
249 *
250 */
251
p9_fd_read(struct p9_client * client,void * v,int len)252 static int p9_fd_read(struct p9_client *client, void *v, int len)
253 {
254 int ret;
255 struct p9_trans_fd *ts = NULL;
256 loff_t pos;
257
258 if (client && client->status != Disconnected)
259 ts = client->trans;
260
261 if (!ts)
262 return -EREMOTEIO;
263
264 if (!(ts->rd->f_flags & O_NONBLOCK))
265 p9_debug(P9_DEBUG_ERROR, "blocking read ...\n");
266
267 pos = ts->rd->f_pos;
268 ret = kernel_read(ts->rd, v, len, &pos);
269 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
270 client->status = Disconnected;
271 return ret;
272 }
273
274 /**
275 * p9_read_work - called when there is some data to be read from a transport
276 * @work: container of work to be done
277 *
278 */
279
p9_read_work(struct work_struct * work)280 static void p9_read_work(struct work_struct *work)
281 {
282 __poll_t n;
283 int err;
284 struct p9_conn *m;
285
286 m = container_of(work, struct p9_conn, rq);
287
288 if (READ_ONCE(m->err) < 0)
289 return;
290
291 p9_debug(P9_DEBUG_TRANS, "start mux %p pos %zd\n", m, m->rc.offset);
292
293 if (!m->rc.sdata) {
294 m->rc.sdata = m->tmp_buf;
295 m->rc.offset = 0;
296 m->rc.capacity = P9_HDRSZ; /* start by reading header */
297 }
298
299 clear_bit(Rpending, &m->wsched);
300 p9_debug(P9_DEBUG_TRANS, "read mux %p pos %zd size: %zd = %zd\n",
301 m, m->rc.offset, m->rc.capacity,
302 m->rc.capacity - m->rc.offset);
303 err = p9_fd_read(m->client, m->rc.sdata + m->rc.offset,
304 m->rc.capacity - m->rc.offset);
305 p9_debug(P9_DEBUG_TRANS, "mux %p got %d bytes\n", m, err);
306 if (err == -EAGAIN)
307 goto end_clear;
308
309 if (err <= 0)
310 goto error;
311
312 m->rc.offset += err;
313
314 /* header read in */
315 if ((!m->rreq) && (m->rc.offset == m->rc.capacity)) {
316 p9_debug(P9_DEBUG_TRANS, "got new header\n");
317
318 /* Header size */
319 m->rc.size = P9_HDRSZ;
320 err = p9_parse_header(&m->rc, &m->rc.size, NULL, NULL, 0);
321 if (err) {
322 p9_debug(P9_DEBUG_ERROR,
323 "error parsing header: %d\n", err);
324 goto error;
325 }
326
327 p9_debug(P9_DEBUG_TRANS,
328 "mux %p pkt: size: %d bytes tag: %d\n",
329 m, m->rc.size, m->rc.tag);
330
331 m->rreq = p9_tag_lookup(m->client, m->rc.tag);
332 if (!m->rreq || (m->rreq->status != REQ_STATUS_SENT)) {
333 p9_debug(P9_DEBUG_ERROR, "Unexpected packet tag %d\n",
334 m->rc.tag);
335 err = -EIO;
336 goto error;
337 }
338
339 if (m->rc.size > m->rreq->rc.capacity) {
340 p9_debug(P9_DEBUG_ERROR,
341 "requested packet size too big: %d for tag %d with capacity %zd\n",
342 m->rc.size, m->rc.tag, m->rreq->rc.capacity);
343 err = -EIO;
344 goto error;
345 }
346
347 if (!m->rreq->rc.sdata) {
348 p9_debug(P9_DEBUG_ERROR,
349 "No recv fcall for tag %d (req %p), disconnecting!\n",
350 m->rc.tag, m->rreq);
351 p9_req_put(m->client, m->rreq);
352 m->rreq = NULL;
353 err = -EIO;
354 goto error;
355 }
356 m->rc.sdata = m->rreq->rc.sdata;
357 memcpy(m->rc.sdata, m->tmp_buf, m->rc.capacity);
358 m->rc.capacity = m->rc.size;
359 }
360
361 /* packet is read in
362 * not an else because some packets (like clunk) have no payload
363 */
364 if ((m->rreq) && (m->rc.offset == m->rc.capacity)) {
365 p9_debug(P9_DEBUG_TRANS, "got new packet\n");
366 m->rreq->rc.size = m->rc.offset;
367 spin_lock(&m->req_lock);
368 if (m->rreq->status == REQ_STATUS_SENT) {
369 list_del(&m->rreq->req_list);
370 p9_client_cb(m->client, m->rreq, REQ_STATUS_RCVD);
371 } else if (m->rreq->status == REQ_STATUS_FLSHD) {
372 /* Ignore replies associated with a cancelled request. */
373 p9_debug(P9_DEBUG_TRANS,
374 "Ignore replies associated with a cancelled request\n");
375 } else {
376 spin_unlock(&m->req_lock);
377 p9_debug(P9_DEBUG_ERROR,
378 "Request tag %d errored out while we were reading the reply\n",
379 m->rc.tag);
380 err = -EIO;
381 goto error;
382 }
383 spin_unlock(&m->req_lock);
384 m->rc.sdata = NULL;
385 m->rc.offset = 0;
386 m->rc.capacity = 0;
387 p9_req_put(m->client, m->rreq);
388 m->rreq = NULL;
389 }
390
391 end_clear:
392 clear_bit(Rworksched, &m->wsched);
393
394 if (!list_empty(&m->req_list)) {
395 if (test_and_clear_bit(Rpending, &m->wsched))
396 n = EPOLLIN;
397 else
398 n = p9_fd_poll(m->client, NULL, NULL);
399
400 if ((n & EPOLLIN) && !test_and_set_bit(Rworksched, &m->wsched)) {
401 p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m);
402 schedule_work(&m->rq);
403 }
404 }
405
406 return;
407 error:
408 p9_conn_cancel(m, err);
409 clear_bit(Rworksched, &m->wsched);
410 }
411
412 /**
413 * p9_fd_write - write to a socket
414 * @client: client instance
415 * @v: buffer to send data from
416 * @len: size of send buffer
417 *
418 */
419
p9_fd_write(struct p9_client * client,void * v,int len)420 static int p9_fd_write(struct p9_client *client, void *v, int len)
421 {
422 ssize_t ret;
423 struct p9_trans_fd *ts = NULL;
424
425 if (client && client->status != Disconnected)
426 ts = client->trans;
427
428 if (!ts)
429 return -EREMOTEIO;
430
431 if (!(ts->wr->f_flags & O_NONBLOCK))
432 p9_debug(P9_DEBUG_ERROR, "blocking write ...\n");
433
434 ret = kernel_write(ts->wr, v, len, &ts->wr->f_pos);
435 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
436 client->status = Disconnected;
437 return ret;
438 }
439
440 /**
441 * p9_write_work - called when a transport can send some data
442 * @work: container for work to be done
443 *
444 */
445
p9_write_work(struct work_struct * work)446 static void p9_write_work(struct work_struct *work)
447 {
448 __poll_t n;
449 int err;
450 struct p9_conn *m;
451 struct p9_req_t *req;
452
453 m = container_of(work, struct p9_conn, wq);
454
455 if (READ_ONCE(m->err) < 0) {
456 clear_bit(Wworksched, &m->wsched);
457 return;
458 }
459
460 if (!m->wsize) {
461 spin_lock(&m->req_lock);
462 if (list_empty(&m->unsent_req_list)) {
463 clear_bit(Wworksched, &m->wsched);
464 spin_unlock(&m->req_lock);
465 return;
466 }
467
468 req = list_entry(m->unsent_req_list.next, struct p9_req_t,
469 req_list);
470 WRITE_ONCE(req->status, REQ_STATUS_SENT);
471 p9_debug(P9_DEBUG_TRANS, "move req %p\n", req);
472 list_move_tail(&req->req_list, &m->req_list);
473
474 m->wbuf = req->tc.sdata;
475 m->wsize = req->tc.size;
476 m->wpos = 0;
477 p9_req_get(req);
478 m->wreq = req;
479 spin_unlock(&m->req_lock);
480 }
481
482 p9_debug(P9_DEBUG_TRANS, "mux %p pos %d size %d\n",
483 m, m->wpos, m->wsize);
484 clear_bit(Wpending, &m->wsched);
485 err = p9_fd_write(m->client, m->wbuf + m->wpos, m->wsize - m->wpos);
486 p9_debug(P9_DEBUG_TRANS, "mux %p sent %d bytes\n", m, err);
487 if (err == -EAGAIN)
488 goto end_clear;
489
490
491 if (err < 0)
492 goto error;
493 else if (err == 0) {
494 err = -EREMOTEIO;
495 goto error;
496 }
497
498 m->wpos += err;
499 if (m->wpos == m->wsize) {
500 m->wpos = m->wsize = 0;
501 p9_req_put(m->client, m->wreq);
502 m->wreq = NULL;
503 }
504
505 end_clear:
506 clear_bit(Wworksched, &m->wsched);
507
508 if (m->wsize || !list_empty(&m->unsent_req_list)) {
509 if (test_and_clear_bit(Wpending, &m->wsched))
510 n = EPOLLOUT;
511 else
512 n = p9_fd_poll(m->client, NULL, NULL);
513
514 if ((n & EPOLLOUT) &&
515 !test_and_set_bit(Wworksched, &m->wsched)) {
516 p9_debug(P9_DEBUG_TRANS, "sched write work %p\n", m);
517 schedule_work(&m->wq);
518 }
519 }
520
521 return;
522
523 error:
524 p9_conn_cancel(m, err);
525 clear_bit(Wworksched, &m->wsched);
526 }
527
p9_pollwake(wait_queue_entry_t * wait,unsigned int mode,int sync,void * key)528 static int p9_pollwake(wait_queue_entry_t *wait, unsigned int mode, int sync, void *key)
529 {
530 struct p9_poll_wait *pwait =
531 container_of(wait, struct p9_poll_wait, wait);
532 struct p9_conn *m = pwait->conn;
533 unsigned long flags;
534
535 spin_lock_irqsave(&p9_poll_lock, flags);
536 if (list_empty(&m->poll_pending_link))
537 list_add_tail(&m->poll_pending_link, &p9_poll_pending_list);
538 spin_unlock_irqrestore(&p9_poll_lock, flags);
539
540 schedule_work(&p9_poll_work);
541 return 1;
542 }
543
544 /**
545 * p9_pollwait - add poll task to the wait queue
546 * @filp: file pointer being polled
547 * @wait_address: wait_q to block on
548 * @p: poll state
549 *
550 * called by files poll operation to add v9fs-poll task to files wait queue
551 */
552
553 static void
p9_pollwait(struct file * filp,wait_queue_head_t * wait_address,poll_table * p)554 p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p)
555 {
556 struct p9_conn *m = container_of(p, struct p9_conn, pt);
557 struct p9_poll_wait *pwait = NULL;
558 int i;
559
560 for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
561 if (m->poll_wait[i].wait_addr == NULL) {
562 pwait = &m->poll_wait[i];
563 break;
564 }
565 }
566
567 if (!pwait) {
568 p9_debug(P9_DEBUG_ERROR, "not enough wait_address slots\n");
569 return;
570 }
571
572 pwait->conn = m;
573 pwait->wait_addr = wait_address;
574 init_waitqueue_func_entry(&pwait->wait, p9_pollwake);
575 add_wait_queue(wait_address, &pwait->wait);
576 }
577
578 /**
579 * p9_conn_create - initialize the per-session mux data
580 * @client: client instance
581 *
582 * Note: Creates the polling task if this is the first session.
583 */
584
p9_conn_create(struct p9_client * client)585 static void p9_conn_create(struct p9_client *client)
586 {
587 __poll_t n;
588 struct p9_trans_fd *ts = client->trans;
589 struct p9_conn *m = &ts->conn;
590
591 p9_debug(P9_DEBUG_TRANS, "client %p msize %d\n", client, client->msize);
592
593 INIT_LIST_HEAD(&m->mux_list);
594 m->client = client;
595
596 spin_lock_init(&m->req_lock);
597 INIT_LIST_HEAD(&m->req_list);
598 INIT_LIST_HEAD(&m->unsent_req_list);
599 INIT_WORK(&m->rq, p9_read_work);
600 INIT_WORK(&m->wq, p9_write_work);
601 INIT_LIST_HEAD(&m->poll_pending_link);
602 init_poll_funcptr(&m->pt, p9_pollwait);
603
604 n = p9_fd_poll(client, &m->pt, NULL);
605 if (n & EPOLLIN) {
606 p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m);
607 set_bit(Rpending, &m->wsched);
608 }
609
610 if (n & EPOLLOUT) {
611 p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m);
612 set_bit(Wpending, &m->wsched);
613 }
614 }
615
616 /**
617 * p9_poll_mux - polls a mux and schedules read or write works if necessary
618 * @m: connection to poll
619 *
620 */
621
p9_poll_mux(struct p9_conn * m)622 static void p9_poll_mux(struct p9_conn *m)
623 {
624 __poll_t n;
625 int err = -ECONNRESET;
626
627 if (READ_ONCE(m->err) < 0)
628 return;
629
630 n = p9_fd_poll(m->client, NULL, &err);
631 if (n & (EPOLLERR | EPOLLHUP | EPOLLNVAL)) {
632 p9_debug(P9_DEBUG_TRANS, "error mux %p err %d\n", m, n);
633 p9_conn_cancel(m, err);
634 }
635
636 if (n & EPOLLIN) {
637 set_bit(Rpending, &m->wsched);
638 p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m);
639 if (!test_and_set_bit(Rworksched, &m->wsched)) {
640 p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m);
641 schedule_work(&m->rq);
642 }
643 }
644
645 if (n & EPOLLOUT) {
646 set_bit(Wpending, &m->wsched);
647 p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m);
648 if ((m->wsize || !list_empty(&m->unsent_req_list)) &&
649 !test_and_set_bit(Wworksched, &m->wsched)) {
650 p9_debug(P9_DEBUG_TRANS, "sched write work %p\n", m);
651 schedule_work(&m->wq);
652 }
653 }
654 }
655
656 /**
657 * p9_fd_request - send 9P request
658 * The function can sleep until the request is scheduled for sending.
659 * The function can be interrupted. Return from the function is not
660 * a guarantee that the request is sent successfully.
661 *
662 * @client: client instance
663 * @req: request to be sent
664 *
665 */
666
p9_fd_request(struct p9_client * client,struct p9_req_t * req)667 static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
668 {
669 __poll_t n;
670 int err;
671 struct p9_trans_fd *ts = client->trans;
672 struct p9_conn *m = &ts->conn;
673
674 p9_debug(P9_DEBUG_TRANS, "mux %p task %p tcall %p id %d\n",
675 m, current, &req->tc, req->tc.id);
676
677 spin_lock(&m->req_lock);
678
679 err = READ_ONCE(m->err);
680 if (err < 0) {
681 spin_unlock(&m->req_lock);
682 return err;
683 }
684
685 WRITE_ONCE(req->status, REQ_STATUS_UNSENT);
686 list_add_tail(&req->req_list, &m->unsent_req_list);
687 spin_unlock(&m->req_lock);
688
689 if (test_and_clear_bit(Wpending, &m->wsched))
690 n = EPOLLOUT;
691 else
692 n = p9_fd_poll(m->client, NULL, NULL);
693
694 if (n & EPOLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
695 schedule_work(&m->wq);
696
697 return 0;
698 }
699
p9_fd_cancel(struct p9_client * client,struct p9_req_t * req)700 static int p9_fd_cancel(struct p9_client *client, struct p9_req_t *req)
701 {
702 struct p9_trans_fd *ts = client->trans;
703 struct p9_conn *m = &ts->conn;
704 int ret = 1;
705
706 p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
707
708 spin_lock(&m->req_lock);
709
710 if (req->status == REQ_STATUS_UNSENT) {
711 list_del(&req->req_list);
712 WRITE_ONCE(req->status, REQ_STATUS_FLSHD);
713 p9_req_put(client, req);
714 ret = 0;
715 }
716 spin_unlock(&m->req_lock);
717
718 return ret;
719 }
720
p9_fd_cancelled(struct p9_client * client,struct p9_req_t * req)721 static int p9_fd_cancelled(struct p9_client *client, struct p9_req_t *req)
722 {
723 struct p9_trans_fd *ts = client->trans;
724 struct p9_conn *m = &ts->conn;
725
726 p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
727
728 spin_lock(&m->req_lock);
729 /* Ignore cancelled request if message has been received
730 * before lock.
731 */
732 if (req->status == REQ_STATUS_RCVD) {
733 spin_unlock(&m->req_lock);
734 return 0;
735 }
736
737 /* we haven't received a response for oldreq,
738 * remove it from the list.
739 */
740 list_del(&req->req_list);
741 WRITE_ONCE(req->status, REQ_STATUS_FLSHD);
742 spin_unlock(&m->req_lock);
743
744 p9_req_put(client, req);
745
746 return 0;
747 }
748
p9_fd_show_options(struct seq_file * m,struct p9_client * clnt)749 static int p9_fd_show_options(struct seq_file *m, struct p9_client *clnt)
750 {
751 if (clnt->trans_mod == &p9_tcp_trans) {
752 if (clnt->trans_opts.tcp.port != P9_PORT)
753 seq_printf(m, ",port=%u", clnt->trans_opts.tcp.port);
754 } else if (clnt->trans_mod == &p9_fd_trans) {
755 if (clnt->trans_opts.fd.rfd != ~0)
756 seq_printf(m, ",rfd=%u", clnt->trans_opts.fd.rfd);
757 if (clnt->trans_opts.fd.wfd != ~0)
758 seq_printf(m, ",wfd=%u", clnt->trans_opts.fd.wfd);
759 }
760 return 0;
761 }
762
763 /**
764 * parse_opts - parse mount options into p9_fd_opts structure
765 * @params: options string passed from mount
766 * @opts: fd transport-specific structure to parse options into
767 *
768 * Returns 0 upon success, -ERRNO upon failure
769 */
770
parse_opts(char * params,struct p9_fd_opts * opts)771 static int parse_opts(char *params, struct p9_fd_opts *opts)
772 {
773 char *p;
774 substring_t args[MAX_OPT_ARGS];
775 int option;
776 char *options, *tmp_options;
777
778 opts->port = P9_PORT;
779 opts->rfd = ~0;
780 opts->wfd = ~0;
781 opts->privport = false;
782
783 if (!params)
784 return 0;
785
786 tmp_options = kstrdup(params, GFP_KERNEL);
787 if (!tmp_options) {
788 p9_debug(P9_DEBUG_ERROR,
789 "failed to allocate copy of option string\n");
790 return -ENOMEM;
791 }
792 options = tmp_options;
793
794 while ((p = strsep(&options, ",")) != NULL) {
795 int token;
796 int r;
797 if (!*p)
798 continue;
799 token = match_token(p, tokens, args);
800 if ((token != Opt_err) && (token != Opt_privport)) {
801 r = match_int(&args[0], &option);
802 if (r < 0) {
803 p9_debug(P9_DEBUG_ERROR,
804 "integer field, but no integer?\n");
805 continue;
806 }
807 }
808 switch (token) {
809 case Opt_port:
810 opts->port = option;
811 break;
812 case Opt_rfdno:
813 opts->rfd = option;
814 break;
815 case Opt_wfdno:
816 opts->wfd = option;
817 break;
818 case Opt_privport:
819 opts->privport = true;
820 break;
821 default:
822 continue;
823 }
824 }
825
826 kfree(tmp_options);
827 return 0;
828 }
829
p9_fd_open(struct p9_client * client,int rfd,int wfd)830 static int p9_fd_open(struct p9_client *client, int rfd, int wfd)
831 {
832 struct p9_trans_fd *ts = kzalloc(sizeof(struct p9_trans_fd),
833 GFP_KERNEL);
834 if (!ts)
835 return -ENOMEM;
836
837 ts->rd = fget(rfd);
838 if (!ts->rd)
839 goto out_free_ts;
840 if (!(ts->rd->f_mode & FMODE_READ))
841 goto out_put_rd;
842 /* Prevent workers from hanging on IO when fd is a pipe.
843 * It's technically possible for userspace or concurrent mounts to
844 * modify this flag concurrently, which will likely result in a
845 * broken filesystem. However, just having bad flags here should
846 * not crash the kernel or cause any other sort of bug, so mark this
847 * particular data race as intentional so that tooling (like KCSAN)
848 * can allow it and detect further problems.
849 */
850 data_race(ts->rd->f_flags |= O_NONBLOCK);
851 ts->wr = fget(wfd);
852 if (!ts->wr)
853 goto out_put_rd;
854 if (!(ts->wr->f_mode & FMODE_WRITE))
855 goto out_put_wr;
856 data_race(ts->wr->f_flags |= O_NONBLOCK);
857
858 client->trans = ts;
859 client->status = Connected;
860
861 return 0;
862
863 out_put_wr:
864 fput(ts->wr);
865 out_put_rd:
866 fput(ts->rd);
867 out_free_ts:
868 kfree(ts);
869 return -EIO;
870 }
871
p9_socket_open(struct p9_client * client,struct socket * csocket)872 static int p9_socket_open(struct p9_client *client, struct socket *csocket)
873 {
874 struct p9_trans_fd *p;
875 struct file *file;
876
877 p = kzalloc(sizeof(struct p9_trans_fd), GFP_KERNEL);
878 if (!p) {
879 sock_release(csocket);
880 return -ENOMEM;
881 }
882
883 csocket->sk->sk_allocation = GFP_NOIO;
884 csocket->sk->sk_use_task_frag = false;
885 file = sock_alloc_file(csocket, 0, NULL);
886 if (IS_ERR(file)) {
887 pr_err("%s (%d): failed to map fd\n",
888 __func__, task_pid_nr(current));
889 kfree(p);
890 return PTR_ERR(file);
891 }
892
893 get_file(file);
894 p->wr = p->rd = file;
895 client->trans = p;
896 client->status = Connected;
897
898 p->rd->f_flags |= O_NONBLOCK;
899
900 p9_conn_create(client);
901 return 0;
902 }
903
904 /**
905 * p9_conn_destroy - cancels all pending requests of mux
906 * @m: mux to destroy
907 *
908 */
909
p9_conn_destroy(struct p9_conn * m)910 static void p9_conn_destroy(struct p9_conn *m)
911 {
912 p9_debug(P9_DEBUG_TRANS, "mux %p prev %p next %p\n",
913 m, m->mux_list.prev, m->mux_list.next);
914
915 p9_mux_poll_stop(m);
916 cancel_work_sync(&m->rq);
917 if (m->rreq) {
918 p9_req_put(m->client, m->rreq);
919 m->rreq = NULL;
920 }
921 cancel_work_sync(&m->wq);
922 if (m->wreq) {
923 p9_req_put(m->client, m->wreq);
924 m->wreq = NULL;
925 }
926
927 p9_conn_cancel(m, -ECONNRESET);
928
929 m->client = NULL;
930 }
931
932 /**
933 * p9_fd_close - shutdown file descriptor transport
934 * @client: client instance
935 *
936 */
937
p9_fd_close(struct p9_client * client)938 static void p9_fd_close(struct p9_client *client)
939 {
940 struct p9_trans_fd *ts;
941
942 if (!client)
943 return;
944
945 ts = client->trans;
946 if (!ts)
947 return;
948
949 client->status = Disconnected;
950
951 p9_conn_destroy(&ts->conn);
952
953 if (ts->rd)
954 fput(ts->rd);
955 if (ts->wr)
956 fput(ts->wr);
957
958 kfree(ts);
959 }
960
p9_bind_privport(struct socket * sock)961 static int p9_bind_privport(struct socket *sock)
962 {
963 struct sockaddr_storage stor = { 0 };
964 int port, err = -EINVAL;
965
966 stor.ss_family = sock->ops->family;
967 if (stor.ss_family == AF_INET)
968 ((struct sockaddr_in *)&stor)->sin_addr.s_addr = htonl(INADDR_ANY);
969 else
970 ((struct sockaddr_in6 *)&stor)->sin6_addr = in6addr_any;
971 for (port = p9_ipport_resv_max; port >= p9_ipport_resv_min; port--) {
972 if (stor.ss_family == AF_INET)
973 ((struct sockaddr_in *)&stor)->sin_port = htons((ushort)port);
974 else
975 ((struct sockaddr_in6 *)&stor)->sin6_port = htons((ushort)port);
976 err = kernel_bind(sock, (struct sockaddr *)&stor, sizeof(stor));
977 if (err != -EADDRINUSE)
978 break;
979 }
980 return err;
981 }
982
983 static int
p9_fd_create_tcp(struct p9_client * client,const char * addr,char * args)984 p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
985 {
986 int err;
987 char port_str[6];
988 struct socket *csocket;
989 struct sockaddr_storage stor = { 0 };
990 struct p9_fd_opts opts;
991
992 err = parse_opts(args, &opts);
993 if (err < 0)
994 return err;
995
996 if (!addr)
997 return -EINVAL;
998
999 sprintf(port_str, "%u", opts.port);
1000 err = inet_pton_with_scope(current->nsproxy->net_ns, AF_UNSPEC, addr,
1001 port_str, &stor);
1002 if (err < 0)
1003 return err;
1004
1005 csocket = NULL;
1006
1007 client->trans_opts.tcp.port = opts.port;
1008 client->trans_opts.tcp.privport = opts.privport;
1009 err = __sock_create(current->nsproxy->net_ns, stor.ss_family,
1010 SOCK_STREAM, IPPROTO_TCP, &csocket, 1);
1011 if (err) {
1012 pr_err("%s (%d): problem creating socket\n",
1013 __func__, task_pid_nr(current));
1014 return err;
1015 }
1016
1017 if (opts.privport) {
1018 err = p9_bind_privport(csocket);
1019 if (err < 0) {
1020 pr_err("%s (%d): problem binding to privport\n",
1021 __func__, task_pid_nr(current));
1022 sock_release(csocket);
1023 return err;
1024 }
1025 }
1026
1027 err = READ_ONCE(csocket->ops)->connect(csocket,
1028 (struct sockaddr *)&stor,
1029 sizeof(stor), 0);
1030 if (err < 0) {
1031 pr_err("%s (%d): problem connecting socket to %s\n",
1032 __func__, task_pid_nr(current), addr);
1033 sock_release(csocket);
1034 return err;
1035 }
1036
1037 return p9_socket_open(client, csocket);
1038 }
1039
1040 static int
p9_fd_create_unix(struct p9_client * client,const char * addr,char * args)1041 p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
1042 {
1043 int err;
1044 struct socket *csocket;
1045 struct sockaddr_un sun_server;
1046
1047 csocket = NULL;
1048
1049 if (!addr || !strlen(addr))
1050 return -EINVAL;
1051
1052 if (strlen(addr) >= UNIX_PATH_MAX) {
1053 pr_err("%s (%d): address too long: %s\n",
1054 __func__, task_pid_nr(current), addr);
1055 return -ENAMETOOLONG;
1056 }
1057
1058 sun_server.sun_family = PF_UNIX;
1059 strcpy(sun_server.sun_path, addr);
1060 err = __sock_create(current->nsproxy->net_ns, PF_UNIX,
1061 SOCK_STREAM, 0, &csocket, 1);
1062 if (err < 0) {
1063 pr_err("%s (%d): problem creating socket\n",
1064 __func__, task_pid_nr(current));
1065
1066 return err;
1067 }
1068 err = READ_ONCE(csocket->ops)->connect(csocket, (struct sockaddr *)&sun_server,
1069 sizeof(struct sockaddr_un) - 1, 0);
1070 if (err < 0) {
1071 pr_err("%s (%d): problem connecting socket: %s: %d\n",
1072 __func__, task_pid_nr(current), addr, err);
1073 sock_release(csocket);
1074 return err;
1075 }
1076
1077 return p9_socket_open(client, csocket);
1078 }
1079
1080 static int
p9_fd_create(struct p9_client * client,const char * addr,char * args)1081 p9_fd_create(struct p9_client *client, const char *addr, char *args)
1082 {
1083 int err;
1084 struct p9_fd_opts opts;
1085
1086 err = parse_opts(args, &opts);
1087 if (err < 0)
1088 return err;
1089 client->trans_opts.fd.rfd = opts.rfd;
1090 client->trans_opts.fd.wfd = opts.wfd;
1091
1092 if (opts.rfd == ~0 || opts.wfd == ~0) {
1093 pr_err("Insufficient options for proto=fd\n");
1094 return -ENOPROTOOPT;
1095 }
1096
1097 err = p9_fd_open(client, opts.rfd, opts.wfd);
1098 if (err < 0)
1099 return err;
1100
1101 p9_conn_create(client);
1102
1103 return 0;
1104 }
1105
1106 static struct p9_trans_module p9_tcp_trans = {
1107 .name = "tcp",
1108 .maxsize = MAX_SOCK_BUF,
1109 .pooled_rbuffers = false,
1110 .def = 0,
1111 .create = p9_fd_create_tcp,
1112 .close = p9_fd_close,
1113 .request = p9_fd_request,
1114 .cancel = p9_fd_cancel,
1115 .cancelled = p9_fd_cancelled,
1116 .show_options = p9_fd_show_options,
1117 .owner = THIS_MODULE,
1118 };
1119 MODULE_ALIAS_9P("tcp");
1120
1121 static struct p9_trans_module p9_unix_trans = {
1122 .name = "unix",
1123 .maxsize = MAX_SOCK_BUF,
1124 .def = 0,
1125 .create = p9_fd_create_unix,
1126 .close = p9_fd_close,
1127 .request = p9_fd_request,
1128 .cancel = p9_fd_cancel,
1129 .cancelled = p9_fd_cancelled,
1130 .show_options = p9_fd_show_options,
1131 .owner = THIS_MODULE,
1132 };
1133 MODULE_ALIAS_9P("unix");
1134
1135 static struct p9_trans_module p9_fd_trans = {
1136 .name = "fd",
1137 .maxsize = MAX_SOCK_BUF,
1138 .def = 0,
1139 .create = p9_fd_create,
1140 .close = p9_fd_close,
1141 .request = p9_fd_request,
1142 .cancel = p9_fd_cancel,
1143 .cancelled = p9_fd_cancelled,
1144 .show_options = p9_fd_show_options,
1145 .owner = THIS_MODULE,
1146 };
1147 MODULE_ALIAS_9P("fd");
1148
1149 /**
1150 * p9_poll_workfn - poll worker thread
1151 * @work: work queue
1152 *
1153 * polls all v9fs transports for new events and queues the appropriate
1154 * work to the work queue
1155 *
1156 */
1157
p9_poll_workfn(struct work_struct * work)1158 static void p9_poll_workfn(struct work_struct *work)
1159 {
1160 unsigned long flags;
1161
1162 p9_debug(P9_DEBUG_TRANS, "start %p\n", current);
1163
1164 spin_lock_irqsave(&p9_poll_lock, flags);
1165 while (!list_empty(&p9_poll_pending_list)) {
1166 struct p9_conn *conn = list_first_entry(&p9_poll_pending_list,
1167 struct p9_conn,
1168 poll_pending_link);
1169 list_del_init(&conn->poll_pending_link);
1170 spin_unlock_irqrestore(&p9_poll_lock, flags);
1171
1172 p9_poll_mux(conn);
1173
1174 spin_lock_irqsave(&p9_poll_lock, flags);
1175 }
1176 spin_unlock_irqrestore(&p9_poll_lock, flags);
1177
1178 p9_debug(P9_DEBUG_TRANS, "finish\n");
1179 }
1180
p9_trans_fd_init(void)1181 static int __init p9_trans_fd_init(void)
1182 {
1183 v9fs_register_trans(&p9_tcp_trans);
1184 v9fs_register_trans(&p9_unix_trans);
1185 v9fs_register_trans(&p9_fd_trans);
1186
1187 return 0;
1188 }
1189
p9_trans_fd_exit(void)1190 static void __exit p9_trans_fd_exit(void)
1191 {
1192 flush_work(&p9_poll_work);
1193 v9fs_unregister_trans(&p9_tcp_trans);
1194 v9fs_unregister_trans(&p9_unix_trans);
1195 v9fs_unregister_trans(&p9_fd_trans);
1196 }
1197
1198 module_init(p9_trans_fd_init);
1199 module_exit(p9_trans_fd_exit);
1200
1201 MODULE_AUTHOR("Eric Van Hensbergen <ericvh@gmail.com>");
1202 MODULE_DESCRIPTION("Filedescriptor Transport for 9P");
1203 MODULE_LICENSE("GPL");
1204