xref: /linux/tools/testing/vsock/vsock_test.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * vsock_test - vsock.ko test suite
4  *
5  * Copyright (C) 2017 Red Hat, Inc.
6  *
7  * Author: Stefan Hajnoczi <stefanha@redhat.com>
8  */
9 
10 #include <getopt.h>
11 #include <stdio.h>
12 #include <stdlib.h>
13 #include <string.h>
14 #include <errno.h>
15 #include <unistd.h>
16 #include <linux/kernel.h>
17 #include <sys/types.h>
18 #include <sys/socket.h>
19 #include <time.h>
20 #include <sys/mman.h>
21 #include <poll.h>
22 #include <signal.h>
23 #include <sys/ioctl.h>
24 #include <linux/time64.h>
25 #include <pthread.h>
26 #include <fcntl.h>
27 #include <linux/sockios.h>
28 
29 #include "vsock_test_zerocopy.h"
30 #include "timeout.h"
31 #include "control.h"
32 #include "util.h"
33 
34 /* Basic messages for control_writeulong(), control_readulong() */
35 #define CONTROL_CONTINUE	1
36 #define CONTROL_DONE		0
37 
test_stream_connection_reset(const struct test_opts * opts)38 static void test_stream_connection_reset(const struct test_opts *opts)
39 {
40 	union {
41 		struct sockaddr sa;
42 		struct sockaddr_vm svm;
43 	} addr = {
44 		.svm = {
45 			.svm_family = AF_VSOCK,
46 			.svm_port = opts->peer_port,
47 			.svm_cid = opts->peer_cid,
48 		},
49 	};
50 	int ret;
51 	int fd;
52 
53 	fd = socket(AF_VSOCK, SOCK_STREAM, 0);
54 
55 	timeout_begin(TIMEOUT);
56 	do {
57 		ret = connect(fd, &addr.sa, sizeof(addr.svm));
58 		timeout_check("connect");
59 	} while (ret < 0 && errno == EINTR);
60 	timeout_end();
61 
62 	if (ret != -1) {
63 		fprintf(stderr, "expected connect(2) failure, got %d\n", ret);
64 		exit(EXIT_FAILURE);
65 	}
66 	if (errno != ECONNRESET) {
67 		fprintf(stderr, "unexpected connect(2) errno %d\n", errno);
68 		exit(EXIT_FAILURE);
69 	}
70 
71 	close(fd);
72 }
73 
test_stream_bind_only_client(const struct test_opts * opts)74 static void test_stream_bind_only_client(const struct test_opts *opts)
75 {
76 	union {
77 		struct sockaddr sa;
78 		struct sockaddr_vm svm;
79 	} addr = {
80 		.svm = {
81 			.svm_family = AF_VSOCK,
82 			.svm_port = opts->peer_port,
83 			.svm_cid = opts->peer_cid,
84 		},
85 	};
86 	int ret;
87 	int fd;
88 
89 	/* Wait for the server to be ready */
90 	control_expectln("BIND");
91 
92 	fd = socket(AF_VSOCK, SOCK_STREAM, 0);
93 
94 	timeout_begin(TIMEOUT);
95 	do {
96 		ret = connect(fd, &addr.sa, sizeof(addr.svm));
97 		timeout_check("connect");
98 	} while (ret < 0 && errno == EINTR);
99 	timeout_end();
100 
101 	if (ret != -1) {
102 		fprintf(stderr, "expected connect(2) failure, got %d\n", ret);
103 		exit(EXIT_FAILURE);
104 	}
105 	if (errno != ECONNRESET) {
106 		fprintf(stderr, "unexpected connect(2) errno %d\n", errno);
107 		exit(EXIT_FAILURE);
108 	}
109 
110 	/* Notify the server that the client has finished */
111 	control_writeln("DONE");
112 
113 	close(fd);
114 }
115 
test_stream_bind_only_server(const struct test_opts * opts)116 static void test_stream_bind_only_server(const struct test_opts *opts)
117 {
118 	int fd;
119 
120 	fd = vsock_bind(VMADDR_CID_ANY, opts->peer_port, SOCK_STREAM);
121 
122 	/* Notify the client that the server is ready */
123 	control_writeln("BIND");
124 
125 	/* Wait for the client to finish */
126 	control_expectln("DONE");
127 
128 	close(fd);
129 }
130 
test_stream_client_close_client(const struct test_opts * opts)131 static void test_stream_client_close_client(const struct test_opts *opts)
132 {
133 	int fd;
134 
135 	fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
136 	if (fd < 0) {
137 		perror("connect");
138 		exit(EXIT_FAILURE);
139 	}
140 
141 	send_byte(fd, 1, 0);
142 	close(fd);
143 }
144 
test_stream_client_close_server(const struct test_opts * opts)145 static void test_stream_client_close_server(const struct test_opts *opts)
146 {
147 	int fd;
148 
149 	fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
150 	if (fd < 0) {
151 		perror("accept");
152 		exit(EXIT_FAILURE);
153 	}
154 
155 	/* Wait for the remote to close the connection, before check
156 	 * -EPIPE error on send.
157 	 */
158 	vsock_wait_remote_close(fd);
159 
160 	send_byte(fd, -EPIPE, 0);
161 	recv_byte(fd, 1, 0);
162 	recv_byte(fd, 0, 0);
163 	close(fd);
164 }
165 
test_stream_server_close_client(const struct test_opts * opts)166 static void test_stream_server_close_client(const struct test_opts *opts)
167 {
168 	int fd;
169 
170 	fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
171 	if (fd < 0) {
172 		perror("connect");
173 		exit(EXIT_FAILURE);
174 	}
175 
176 	/* Wait for the remote to close the connection, before check
177 	 * -EPIPE error on send.
178 	 */
179 	vsock_wait_remote_close(fd);
180 
181 	send_byte(fd, -EPIPE, 0);
182 	recv_byte(fd, 1, 0);
183 	recv_byte(fd, 0, 0);
184 	close(fd);
185 }
186 
test_stream_server_close_server(const struct test_opts * opts)187 static void test_stream_server_close_server(const struct test_opts *opts)
188 {
189 	int fd;
190 
191 	fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
192 	if (fd < 0) {
193 		perror("accept");
194 		exit(EXIT_FAILURE);
195 	}
196 
197 	send_byte(fd, 1, 0);
198 	close(fd);
199 }
200 
201 /* With the standard socket sizes, VMCI is able to support about 100
202  * concurrent stream connections.
203  */
204 #define MULTICONN_NFDS 100
205 
test_stream_multiconn_client(const struct test_opts * opts)206 static void test_stream_multiconn_client(const struct test_opts *opts)
207 {
208 	int fds[MULTICONN_NFDS];
209 	int i;
210 
211 	for (i = 0; i < MULTICONN_NFDS; i++) {
212 		fds[i] = vsock_stream_connect(opts->peer_cid, opts->peer_port);
213 		if (fds[i] < 0) {
214 			perror("connect");
215 			exit(EXIT_FAILURE);
216 		}
217 	}
218 
219 	for (i = 0; i < MULTICONN_NFDS; i++) {
220 		if (i % 2)
221 			recv_byte(fds[i], 1, 0);
222 		else
223 			send_byte(fds[i], 1, 0);
224 	}
225 
226 	for (i = 0; i < MULTICONN_NFDS; i++)
227 		close(fds[i]);
228 }
229 
test_stream_multiconn_server(const struct test_opts * opts)230 static void test_stream_multiconn_server(const struct test_opts *opts)
231 {
232 	int fds[MULTICONN_NFDS];
233 	int i;
234 
235 	for (i = 0; i < MULTICONN_NFDS; i++) {
236 		fds[i] = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
237 		if (fds[i] < 0) {
238 			perror("accept");
239 			exit(EXIT_FAILURE);
240 		}
241 	}
242 
243 	for (i = 0; i < MULTICONN_NFDS; i++) {
244 		if (i % 2)
245 			send_byte(fds[i], 1, 0);
246 		else
247 			recv_byte(fds[i], 1, 0);
248 	}
249 
250 	for (i = 0; i < MULTICONN_NFDS; i++)
251 		close(fds[i]);
252 }
253 
254 #define MSG_PEEK_BUF_LEN 64
255 
test_msg_peek_client(const struct test_opts * opts,bool seqpacket)256 static void test_msg_peek_client(const struct test_opts *opts,
257 				 bool seqpacket)
258 {
259 	unsigned char buf[MSG_PEEK_BUF_LEN];
260 	int fd;
261 	int i;
262 
263 	if (seqpacket)
264 		fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port);
265 	else
266 		fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
267 
268 	if (fd < 0) {
269 		perror("connect");
270 		exit(EXIT_FAILURE);
271 	}
272 
273 	for (i = 0; i < sizeof(buf); i++)
274 		buf[i] = rand() & 0xFF;
275 
276 	control_expectln("SRVREADY");
277 
278 	send_buf(fd, buf, sizeof(buf), 0, sizeof(buf));
279 
280 	close(fd);
281 }
282 
test_msg_peek_server(const struct test_opts * opts,bool seqpacket)283 static void test_msg_peek_server(const struct test_opts *opts,
284 				 bool seqpacket)
285 {
286 	unsigned char buf_half[MSG_PEEK_BUF_LEN / 2];
287 	unsigned char buf_normal[MSG_PEEK_BUF_LEN];
288 	unsigned char buf_peek[MSG_PEEK_BUF_LEN];
289 	int fd;
290 
291 	if (seqpacket)
292 		fd = vsock_seqpacket_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
293 	else
294 		fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
295 
296 	if (fd < 0) {
297 		perror("accept");
298 		exit(EXIT_FAILURE);
299 	}
300 
301 	/* Peek from empty socket. */
302 	recv_buf(fd, buf_peek, sizeof(buf_peek), MSG_PEEK | MSG_DONTWAIT,
303 		 -EAGAIN);
304 
305 	control_writeln("SRVREADY");
306 
307 	/* Peek part of data. */
308 	recv_buf(fd, buf_half, sizeof(buf_half), MSG_PEEK, sizeof(buf_half));
309 
310 	/* Peek whole data. */
311 	recv_buf(fd, buf_peek, sizeof(buf_peek), MSG_PEEK, sizeof(buf_peek));
312 
313 	/* Compare partial and full peek. */
314 	if (memcmp(buf_half, buf_peek, sizeof(buf_half))) {
315 		fprintf(stderr, "Partial peek data mismatch\n");
316 		exit(EXIT_FAILURE);
317 	}
318 
319 	if (seqpacket) {
320 		/* This type of socket supports MSG_TRUNC flag,
321 		 * so check it with MSG_PEEK. We must get length
322 		 * of the message.
323 		 */
324 		recv_buf(fd, buf_half, sizeof(buf_half), MSG_PEEK | MSG_TRUNC,
325 			 sizeof(buf_peek));
326 	}
327 
328 	recv_buf(fd, buf_normal, sizeof(buf_normal), 0, sizeof(buf_normal));
329 
330 	/* Compare full peek and normal read. */
331 	if (memcmp(buf_peek, buf_normal, sizeof(buf_peek))) {
332 		fprintf(stderr, "Full peek data mismatch\n");
333 		exit(EXIT_FAILURE);
334 	}
335 
336 	close(fd);
337 }
338 
test_stream_msg_peek_client(const struct test_opts * opts)339 static void test_stream_msg_peek_client(const struct test_opts *opts)
340 {
341 	return test_msg_peek_client(opts, false);
342 }
343 
test_stream_msg_peek_server(const struct test_opts * opts)344 static void test_stream_msg_peek_server(const struct test_opts *opts)
345 {
346 	return test_msg_peek_server(opts, false);
347 }
348 
349 #define SOCK_BUF_SIZE (2 * 1024 * 1024)
350 #define MAX_MSG_PAGES 4
351 
test_seqpacket_msg_bounds_client(const struct test_opts * opts)352 static void test_seqpacket_msg_bounds_client(const struct test_opts *opts)
353 {
354 	unsigned long curr_hash;
355 	size_t max_msg_size;
356 	int page_size;
357 	int msg_count;
358 	int fd;
359 
360 	fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port);
361 	if (fd < 0) {
362 		perror("connect");
363 		exit(EXIT_FAILURE);
364 	}
365 
366 	/* Wait, until receiver sets buffer size. */
367 	control_expectln("SRVREADY");
368 
369 	curr_hash = 0;
370 	page_size = getpagesize();
371 	max_msg_size = MAX_MSG_PAGES * page_size;
372 	msg_count = SOCK_BUF_SIZE / max_msg_size;
373 
374 	for (int i = 0; i < msg_count; i++) {
375 		size_t buf_size;
376 		int flags;
377 		void *buf;
378 
379 		/* Use "small" buffers and "big" buffers. */
380 		if (i & 1)
381 			buf_size = page_size +
382 					(rand() % (max_msg_size - page_size));
383 		else
384 			buf_size = 1 + (rand() % page_size);
385 
386 		buf = malloc(buf_size);
387 
388 		if (!buf) {
389 			perror("malloc");
390 			exit(EXIT_FAILURE);
391 		}
392 
393 		memset(buf, rand() & 0xff, buf_size);
394 		/* Set at least one MSG_EOR + some random. */
395 		if (i == (msg_count / 2) || (rand() & 1)) {
396 			flags = MSG_EOR;
397 			curr_hash++;
398 		} else {
399 			flags = 0;
400 		}
401 
402 		send_buf(fd, buf, buf_size, flags, buf_size);
403 
404 		/*
405 		 * Hash sum is computed at both client and server in
406 		 * the same way:
407 		 * H += hash('message data')
408 		 * Such hash "controls" both data integrity and message
409 		 * bounds. After data exchange, both sums are compared
410 		 * using control socket, and if message bounds wasn't
411 		 * broken - two values must be equal.
412 		 */
413 		curr_hash += hash_djb2(buf, buf_size);
414 		free(buf);
415 	}
416 
417 	control_writeln("SENDDONE");
418 	control_writeulong(curr_hash);
419 	close(fd);
420 }
421 
test_seqpacket_msg_bounds_server(const struct test_opts * opts)422 static void test_seqpacket_msg_bounds_server(const struct test_opts *opts)
423 {
424 	unsigned long long sock_buf_size;
425 	unsigned long remote_hash;
426 	unsigned long curr_hash;
427 	int fd;
428 	struct msghdr msg = {0};
429 	struct iovec iov = {0};
430 
431 	fd = vsock_seqpacket_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
432 	if (fd < 0) {
433 		perror("accept");
434 		exit(EXIT_FAILURE);
435 	}
436 
437 	sock_buf_size = SOCK_BUF_SIZE;
438 
439 	setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_MAX_SIZE,
440 			     sock_buf_size,
441 			     "setsockopt(SO_VM_SOCKETS_BUFFER_MAX_SIZE)");
442 
443 	setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_SIZE,
444 			     sock_buf_size,
445 			     "setsockopt(SO_VM_SOCKETS_BUFFER_SIZE)");
446 
447 	/* Ready to receive data. */
448 	control_writeln("SRVREADY");
449 	/* Wait, until peer sends whole data. */
450 	control_expectln("SENDDONE");
451 	iov.iov_len = MAX_MSG_PAGES * getpagesize();
452 	iov.iov_base = malloc(iov.iov_len);
453 	if (!iov.iov_base) {
454 		perror("malloc");
455 		exit(EXIT_FAILURE);
456 	}
457 
458 	msg.msg_iov = &iov;
459 	msg.msg_iovlen = 1;
460 
461 	curr_hash = 0;
462 
463 	while (1) {
464 		ssize_t recv_size;
465 
466 		recv_size = recvmsg(fd, &msg, 0);
467 
468 		if (!recv_size)
469 			break;
470 
471 		if (recv_size < 0) {
472 			perror("recvmsg");
473 			exit(EXIT_FAILURE);
474 		}
475 
476 		if (msg.msg_flags & MSG_EOR)
477 			curr_hash++;
478 
479 		curr_hash += hash_djb2(msg.msg_iov[0].iov_base, recv_size);
480 	}
481 
482 	free(iov.iov_base);
483 	close(fd);
484 	remote_hash = control_readulong();
485 
486 	if (curr_hash != remote_hash) {
487 		fprintf(stderr, "Message bounds broken\n");
488 		exit(EXIT_FAILURE);
489 	}
490 }
491 
492 #define MESSAGE_TRUNC_SZ 32
test_seqpacket_msg_trunc_client(const struct test_opts * opts)493 static void test_seqpacket_msg_trunc_client(const struct test_opts *opts)
494 {
495 	int fd;
496 	char buf[MESSAGE_TRUNC_SZ];
497 
498 	fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port);
499 	if (fd < 0) {
500 		perror("connect");
501 		exit(EXIT_FAILURE);
502 	}
503 
504 	send_buf(fd, buf, sizeof(buf), 0, sizeof(buf));
505 
506 	control_writeln("SENDDONE");
507 	close(fd);
508 }
509 
test_seqpacket_msg_trunc_server(const struct test_opts * opts)510 static void test_seqpacket_msg_trunc_server(const struct test_opts *opts)
511 {
512 	int fd;
513 	char buf[MESSAGE_TRUNC_SZ / 2];
514 	struct msghdr msg = {0};
515 	struct iovec iov = {0};
516 
517 	fd = vsock_seqpacket_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
518 	if (fd < 0) {
519 		perror("accept");
520 		exit(EXIT_FAILURE);
521 	}
522 
523 	control_expectln("SENDDONE");
524 	iov.iov_base = buf;
525 	iov.iov_len = sizeof(buf);
526 	msg.msg_iov = &iov;
527 	msg.msg_iovlen = 1;
528 
529 	ssize_t ret = recvmsg(fd, &msg, MSG_TRUNC);
530 
531 	if (ret != MESSAGE_TRUNC_SZ) {
532 		printf("%zi\n", ret);
533 		perror("MSG_TRUNC doesn't work");
534 		exit(EXIT_FAILURE);
535 	}
536 
537 	if (!(msg.msg_flags & MSG_TRUNC)) {
538 		fprintf(stderr, "MSG_TRUNC expected\n");
539 		exit(EXIT_FAILURE);
540 	}
541 
542 	close(fd);
543 }
544 
current_nsec(void)545 static time_t current_nsec(void)
546 {
547 	struct timespec ts;
548 
549 	if (clock_gettime(CLOCK_REALTIME, &ts)) {
550 		perror("clock_gettime(3) failed");
551 		exit(EXIT_FAILURE);
552 	}
553 
554 	return (ts.tv_sec * NSEC_PER_SEC) + ts.tv_nsec;
555 }
556 
557 #define RCVTIMEO_TIMEOUT_SEC 1
558 #define READ_OVERHEAD_NSEC 250000000 /* 0.25 sec */
559 
test_seqpacket_timeout_client(const struct test_opts * opts)560 static void test_seqpacket_timeout_client(const struct test_opts *opts)
561 {
562 	int fd;
563 	struct timeval tv;
564 	char dummy;
565 	time_t read_enter_ns;
566 	time_t read_overhead_ns;
567 
568 	fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port);
569 	if (fd < 0) {
570 		perror("connect");
571 		exit(EXIT_FAILURE);
572 	}
573 
574 	tv.tv_sec = RCVTIMEO_TIMEOUT_SEC;
575 	tv.tv_usec = 0;
576 
577 	setsockopt_timeval_check(fd, SOL_SOCKET, SO_RCVTIMEO, tv,
578 				 "setsockopt(SO_RCVTIMEO)");
579 
580 	read_enter_ns = current_nsec();
581 
582 	if (read(fd, &dummy, sizeof(dummy)) != -1) {
583 		fprintf(stderr,
584 			"expected 'dummy' read(2) failure\n");
585 		exit(EXIT_FAILURE);
586 	}
587 
588 	if (errno != EAGAIN) {
589 		perror("EAGAIN expected");
590 		exit(EXIT_FAILURE);
591 	}
592 
593 	read_overhead_ns = current_nsec() - read_enter_ns -
594 			   NSEC_PER_SEC * RCVTIMEO_TIMEOUT_SEC;
595 
596 	if (read_overhead_ns > READ_OVERHEAD_NSEC) {
597 		fprintf(stderr,
598 			"too much time in read(2), %lu > %i ns\n",
599 			read_overhead_ns, READ_OVERHEAD_NSEC);
600 		exit(EXIT_FAILURE);
601 	}
602 
603 	control_writeln("WAITDONE");
604 	close(fd);
605 }
606 
test_seqpacket_timeout_server(const struct test_opts * opts)607 static void test_seqpacket_timeout_server(const struct test_opts *opts)
608 {
609 	int fd;
610 
611 	fd = vsock_seqpacket_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
612 	if (fd < 0) {
613 		perror("accept");
614 		exit(EXIT_FAILURE);
615 	}
616 
617 	control_expectln("WAITDONE");
618 	close(fd);
619 }
620 
test_seqpacket_bigmsg_client(const struct test_opts * opts)621 static void test_seqpacket_bigmsg_client(const struct test_opts *opts)
622 {
623 	unsigned long long sock_buf_size;
624 	size_t buf_size;
625 	socklen_t len;
626 	void *data;
627 	int fd;
628 
629 	len = sizeof(sock_buf_size);
630 
631 	fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port);
632 	if (fd < 0) {
633 		perror("connect");
634 		exit(EXIT_FAILURE);
635 	}
636 
637 	if (getsockopt(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_SIZE,
638 		       &sock_buf_size, &len)) {
639 		perror("getsockopt");
640 		exit(EXIT_FAILURE);
641 	}
642 
643 	sock_buf_size++;
644 
645 	/* size_t can be < unsigned long long */
646 	buf_size = (size_t)sock_buf_size;
647 	if (buf_size != sock_buf_size) {
648 		fprintf(stderr, "Returned BUFFER_SIZE too large\n");
649 		exit(EXIT_FAILURE);
650 	}
651 
652 	data = malloc(buf_size);
653 	if (!data) {
654 		perror("malloc");
655 		exit(EXIT_FAILURE);
656 	}
657 
658 	send_buf(fd, data, buf_size, 0, -EMSGSIZE);
659 
660 	control_writeln("CLISENT");
661 
662 	free(data);
663 	close(fd);
664 }
665 
test_seqpacket_bigmsg_server(const struct test_opts * opts)666 static void test_seqpacket_bigmsg_server(const struct test_opts *opts)
667 {
668 	int fd;
669 
670 	fd = vsock_seqpacket_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
671 	if (fd < 0) {
672 		perror("accept");
673 		exit(EXIT_FAILURE);
674 	}
675 
676 	control_expectln("CLISENT");
677 
678 	close(fd);
679 }
680 
681 #define BUF_PATTERN_1 'a'
682 #define BUF_PATTERN_2 'b'
683 
test_seqpacket_invalid_rec_buffer_client(const struct test_opts * opts)684 static void test_seqpacket_invalid_rec_buffer_client(const struct test_opts *opts)
685 {
686 	int fd;
687 	unsigned char *buf1;
688 	unsigned char *buf2;
689 	int buf_size = getpagesize() * 3;
690 
691 	fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port);
692 	if (fd < 0) {
693 		perror("connect");
694 		exit(EXIT_FAILURE);
695 	}
696 
697 	buf1 = malloc(buf_size);
698 	if (!buf1) {
699 		perror("'malloc()' for 'buf1'");
700 		exit(EXIT_FAILURE);
701 	}
702 
703 	buf2 = malloc(buf_size);
704 	if (!buf2) {
705 		perror("'malloc()' for 'buf2'");
706 		exit(EXIT_FAILURE);
707 	}
708 
709 	memset(buf1, BUF_PATTERN_1, buf_size);
710 	memset(buf2, BUF_PATTERN_2, buf_size);
711 
712 	send_buf(fd, buf1, buf_size, 0, buf_size);
713 
714 	send_buf(fd, buf2, buf_size, 0, buf_size);
715 
716 	close(fd);
717 }
718 
test_seqpacket_invalid_rec_buffer_server(const struct test_opts * opts)719 static void test_seqpacket_invalid_rec_buffer_server(const struct test_opts *opts)
720 {
721 	int fd;
722 	unsigned char *broken_buf;
723 	unsigned char *valid_buf;
724 	int page_size = getpagesize();
725 	int buf_size = page_size * 3;
726 	ssize_t res;
727 	int prot = PROT_READ | PROT_WRITE;
728 	int flags = MAP_PRIVATE | MAP_ANONYMOUS;
729 	int i;
730 
731 	fd = vsock_seqpacket_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
732 	if (fd < 0) {
733 		perror("accept");
734 		exit(EXIT_FAILURE);
735 	}
736 
737 	/* Setup first buffer. */
738 	broken_buf = mmap(NULL, buf_size, prot, flags, -1, 0);
739 	if (broken_buf == MAP_FAILED) {
740 		perror("mmap for 'broken_buf'");
741 		exit(EXIT_FAILURE);
742 	}
743 
744 	/* Unmap "hole" in buffer. */
745 	if (munmap(broken_buf + page_size, page_size)) {
746 		perror("'broken_buf' setup");
747 		exit(EXIT_FAILURE);
748 	}
749 
750 	valid_buf = mmap(NULL, buf_size, prot, flags, -1, 0);
751 	if (valid_buf == MAP_FAILED) {
752 		perror("mmap for 'valid_buf'");
753 		exit(EXIT_FAILURE);
754 	}
755 
756 	/* Try to fill buffer with unmapped middle. */
757 	res = read(fd, broken_buf, buf_size);
758 	if (res != -1) {
759 		fprintf(stderr,
760 			"expected 'broken_buf' read(2) failure, got %zi\n",
761 			res);
762 		exit(EXIT_FAILURE);
763 	}
764 
765 	if (errno != EFAULT) {
766 		perror("unexpected errno of 'broken_buf'");
767 		exit(EXIT_FAILURE);
768 	}
769 
770 	/* Try to fill valid buffer. */
771 	res = read(fd, valid_buf, buf_size);
772 	if (res < 0) {
773 		perror("unexpected 'valid_buf' read(2) failure");
774 		exit(EXIT_FAILURE);
775 	}
776 
777 	if (res != buf_size) {
778 		fprintf(stderr,
779 			"invalid 'valid_buf' read(2), expected %i, got %zi\n",
780 			buf_size, res);
781 		exit(EXIT_FAILURE);
782 	}
783 
784 	for (i = 0; i < buf_size; i++) {
785 		if (valid_buf[i] != BUF_PATTERN_2) {
786 			fprintf(stderr,
787 				"invalid pattern for 'valid_buf' at %i, expected %hhX, got %hhX\n",
788 				i, BUF_PATTERN_2, valid_buf[i]);
789 			exit(EXIT_FAILURE);
790 		}
791 	}
792 
793 	/* Unmap buffers. */
794 	munmap(broken_buf, page_size);
795 	munmap(broken_buf + page_size * 2, page_size);
796 	munmap(valid_buf, buf_size);
797 	close(fd);
798 }
799 
800 #define RCVLOWAT_BUF_SIZE 128
801 
test_stream_poll_rcvlowat_server(const struct test_opts * opts)802 static void test_stream_poll_rcvlowat_server(const struct test_opts *opts)
803 {
804 	int fd;
805 	int i;
806 
807 	fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
808 	if (fd < 0) {
809 		perror("accept");
810 		exit(EXIT_FAILURE);
811 	}
812 
813 	/* Send 1 byte. */
814 	send_byte(fd, 1, 0);
815 
816 	control_writeln("SRVSENT");
817 
818 	/* Wait until client is ready to receive rest of data. */
819 	control_expectln("CLNSENT");
820 
821 	for (i = 0; i < RCVLOWAT_BUF_SIZE - 1; i++)
822 		send_byte(fd, 1, 0);
823 
824 	/* Keep socket in active state. */
825 	control_expectln("POLLDONE");
826 
827 	close(fd);
828 }
829 
test_stream_poll_rcvlowat_client(const struct test_opts * opts)830 static void test_stream_poll_rcvlowat_client(const struct test_opts *opts)
831 {
832 	int lowat_val = RCVLOWAT_BUF_SIZE;
833 	char buf[RCVLOWAT_BUF_SIZE];
834 	struct pollfd fds;
835 	short poll_flags;
836 	int fd;
837 
838 	fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
839 	if (fd < 0) {
840 		perror("connect");
841 		exit(EXIT_FAILURE);
842 	}
843 
844 	setsockopt_int_check(fd, SOL_SOCKET, SO_RCVLOWAT,
845 			     lowat_val, "setsockopt(SO_RCVLOWAT)");
846 
847 	control_expectln("SRVSENT");
848 
849 	/* At this point, server sent 1 byte. */
850 	fds.fd = fd;
851 	poll_flags = POLLIN | POLLRDNORM;
852 	fds.events = poll_flags;
853 
854 	/* Try to wait for 1 sec. */
855 	if (poll(&fds, 1, 1000) < 0) {
856 		perror("poll");
857 		exit(EXIT_FAILURE);
858 	}
859 
860 	/* poll() must return nothing. */
861 	if (fds.revents) {
862 		fprintf(stderr, "Unexpected poll result %hx\n",
863 			fds.revents);
864 		exit(EXIT_FAILURE);
865 	}
866 
867 	/* Tell server to send rest of data. */
868 	control_writeln("CLNSENT");
869 
870 	/* Poll for data. */
871 	if (poll(&fds, 1, 10000) < 0) {
872 		perror("poll");
873 		exit(EXIT_FAILURE);
874 	}
875 
876 	/* Only these two bits are expected. */
877 	if (fds.revents != poll_flags) {
878 		fprintf(stderr, "Unexpected poll result %hx\n",
879 			fds.revents);
880 		exit(EXIT_FAILURE);
881 	}
882 
883 	/* Use MSG_DONTWAIT, if call is going to wait, EAGAIN
884 	 * will be returned.
885 	 */
886 	recv_buf(fd, buf, sizeof(buf), MSG_DONTWAIT, RCVLOWAT_BUF_SIZE);
887 
888 	control_writeln("POLLDONE");
889 
890 	close(fd);
891 }
892 
893 #define INV_BUF_TEST_DATA_LEN 512
894 
test_inv_buf_client(const struct test_opts * opts,bool stream)895 static void test_inv_buf_client(const struct test_opts *opts, bool stream)
896 {
897 	unsigned char data[INV_BUF_TEST_DATA_LEN] = {0};
898 	ssize_t expected_ret;
899 	int fd;
900 
901 	if (stream)
902 		fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
903 	else
904 		fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port);
905 
906 	if (fd < 0) {
907 		perror("connect");
908 		exit(EXIT_FAILURE);
909 	}
910 
911 	control_expectln("SENDDONE");
912 
913 	/* Use invalid buffer here. */
914 	recv_buf(fd, NULL, sizeof(data), 0, -EFAULT);
915 
916 	if (stream) {
917 		/* For SOCK_STREAM we must continue reading. */
918 		expected_ret = sizeof(data);
919 	} else {
920 		/* For SOCK_SEQPACKET socket's queue must be empty. */
921 		expected_ret = -EAGAIN;
922 	}
923 
924 	recv_buf(fd, data, sizeof(data), MSG_DONTWAIT, expected_ret);
925 
926 	control_writeln("DONE");
927 
928 	close(fd);
929 }
930 
test_inv_buf_server(const struct test_opts * opts,bool stream)931 static void test_inv_buf_server(const struct test_opts *opts, bool stream)
932 {
933 	unsigned char data[INV_BUF_TEST_DATA_LEN] = {0};
934 	int fd;
935 
936 	if (stream)
937 		fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
938 	else
939 		fd = vsock_seqpacket_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
940 
941 	if (fd < 0) {
942 		perror("accept");
943 		exit(EXIT_FAILURE);
944 	}
945 
946 	send_buf(fd, data, sizeof(data), 0, sizeof(data));
947 
948 	control_writeln("SENDDONE");
949 
950 	control_expectln("DONE");
951 
952 	close(fd);
953 }
954 
test_stream_inv_buf_client(const struct test_opts * opts)955 static void test_stream_inv_buf_client(const struct test_opts *opts)
956 {
957 	test_inv_buf_client(opts, true);
958 }
959 
test_stream_inv_buf_server(const struct test_opts * opts)960 static void test_stream_inv_buf_server(const struct test_opts *opts)
961 {
962 	test_inv_buf_server(opts, true);
963 }
964 
test_seqpacket_inv_buf_client(const struct test_opts * opts)965 static void test_seqpacket_inv_buf_client(const struct test_opts *opts)
966 {
967 	test_inv_buf_client(opts, false);
968 }
969 
test_seqpacket_inv_buf_server(const struct test_opts * opts)970 static void test_seqpacket_inv_buf_server(const struct test_opts *opts)
971 {
972 	test_inv_buf_server(opts, false);
973 }
974 
975 #define HELLO_STR "HELLO"
976 #define WORLD_STR "WORLD"
977 
test_stream_virtio_skb_merge_client(const struct test_opts * opts)978 static void test_stream_virtio_skb_merge_client(const struct test_opts *opts)
979 {
980 	int fd;
981 
982 	fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
983 	if (fd < 0) {
984 		perror("connect");
985 		exit(EXIT_FAILURE);
986 	}
987 
988 	/* Send first skbuff. */
989 	send_buf(fd, HELLO_STR, strlen(HELLO_STR), 0, strlen(HELLO_STR));
990 
991 	control_writeln("SEND0");
992 	/* Peer reads part of first skbuff. */
993 	control_expectln("REPLY0");
994 
995 	/* Send second skbuff, it will be appended to the first. */
996 	send_buf(fd, WORLD_STR, strlen(WORLD_STR), 0, strlen(WORLD_STR));
997 
998 	control_writeln("SEND1");
999 	/* Peer reads merged skbuff packet. */
1000 	control_expectln("REPLY1");
1001 
1002 	close(fd);
1003 }
1004 
test_stream_virtio_skb_merge_server(const struct test_opts * opts)1005 static void test_stream_virtio_skb_merge_server(const struct test_opts *opts)
1006 {
1007 	size_t read = 0, to_read;
1008 	unsigned char buf[64];
1009 	int fd;
1010 
1011 	fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
1012 	if (fd < 0) {
1013 		perror("accept");
1014 		exit(EXIT_FAILURE);
1015 	}
1016 
1017 	control_expectln("SEND0");
1018 
1019 	/* Read skbuff partially. */
1020 	to_read = 2;
1021 	recv_buf(fd, buf + read, to_read, 0, to_read);
1022 	read += to_read;
1023 
1024 	control_writeln("REPLY0");
1025 	control_expectln("SEND1");
1026 
1027 	/* Read the rest of both buffers */
1028 	to_read = strlen(HELLO_STR WORLD_STR) - read;
1029 	recv_buf(fd, buf + read, to_read, 0, to_read);
1030 	read += to_read;
1031 
1032 	/* No more bytes should be there */
1033 	to_read = sizeof(buf) - read;
1034 	recv_buf(fd, buf + read, to_read, MSG_DONTWAIT, -EAGAIN);
1035 
1036 	if (memcmp(buf, HELLO_STR WORLD_STR, strlen(HELLO_STR WORLD_STR))) {
1037 		fprintf(stderr, "pattern mismatch\n");
1038 		exit(EXIT_FAILURE);
1039 	}
1040 
1041 	control_writeln("REPLY1");
1042 
1043 	close(fd);
1044 }
1045 
test_seqpacket_msg_peek_client(const struct test_opts * opts)1046 static void test_seqpacket_msg_peek_client(const struct test_opts *opts)
1047 {
1048 	return test_msg_peek_client(opts, true);
1049 }
1050 
test_seqpacket_msg_peek_server(const struct test_opts * opts)1051 static void test_seqpacket_msg_peek_server(const struct test_opts *opts)
1052 {
1053 	return test_msg_peek_server(opts, true);
1054 }
1055 
1056 static sig_atomic_t have_sigpipe;
1057 
sigpipe(int signo)1058 static void sigpipe(int signo)
1059 {
1060 	have_sigpipe = 1;
1061 }
1062 
1063 #define SEND_SLEEP_USEC (10 * 1000)
1064 
test_stream_check_sigpipe(int fd)1065 static void test_stream_check_sigpipe(int fd)
1066 {
1067 	ssize_t res;
1068 
1069 	have_sigpipe = 0;
1070 
1071 	/* When the other peer calls shutdown(SHUT_RD), there is a chance that
1072 	 * the send() call could occur before the message carrying the close
1073 	 * information arrives over the transport. In such cases, the send()
1074 	 * might still succeed. To avoid this race, let's retry the send() call
1075 	 * a few times, ensuring the test is more reliable.
1076 	 */
1077 	timeout_begin(TIMEOUT);
1078 	while(1) {
1079 		res = send(fd, "A", 1, 0);
1080 		if (res == -1 && errno != EINTR)
1081 			break;
1082 
1083 		/* Sleep a little before trying again to avoid flooding the
1084 		 * other peer and filling its receive buffer, causing
1085 		 * false-negative.
1086 		 */
1087 		timeout_usleep(SEND_SLEEP_USEC);
1088 		timeout_check("send");
1089 	}
1090 	timeout_end();
1091 
1092 	if (errno != EPIPE) {
1093 		fprintf(stderr, "unexpected send(2) errno %d\n", errno);
1094 		exit(EXIT_FAILURE);
1095 	}
1096 	if (!have_sigpipe) {
1097 		fprintf(stderr, "SIGPIPE expected\n");
1098 		exit(EXIT_FAILURE);
1099 	}
1100 
1101 	have_sigpipe = 0;
1102 
1103 	timeout_begin(TIMEOUT);
1104 	while(1) {
1105 		res = send(fd, "A", 1, MSG_NOSIGNAL);
1106 		if (res == -1 && errno != EINTR)
1107 			break;
1108 
1109 		timeout_usleep(SEND_SLEEP_USEC);
1110 		timeout_check("send");
1111 	}
1112 	timeout_end();
1113 
1114 	if (errno != EPIPE) {
1115 		fprintf(stderr, "unexpected send(2) errno %d\n", errno);
1116 		exit(EXIT_FAILURE);
1117 	}
1118 	if (have_sigpipe) {
1119 		fprintf(stderr, "SIGPIPE not expected\n");
1120 		exit(EXIT_FAILURE);
1121 	}
1122 }
1123 
test_stream_shutwr_client(const struct test_opts * opts)1124 static void test_stream_shutwr_client(const struct test_opts *opts)
1125 {
1126 	int fd;
1127 
1128 	struct sigaction act = {
1129 		.sa_handler = sigpipe,
1130 	};
1131 
1132 	sigaction(SIGPIPE, &act, NULL);
1133 
1134 	fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
1135 	if (fd < 0) {
1136 		perror("connect");
1137 		exit(EXIT_FAILURE);
1138 	}
1139 
1140 	if (shutdown(fd, SHUT_WR)) {
1141 		perror("shutdown");
1142 		exit(EXIT_FAILURE);
1143 	}
1144 
1145 	test_stream_check_sigpipe(fd);
1146 
1147 	control_writeln("CLIENTDONE");
1148 
1149 	close(fd);
1150 }
1151 
test_stream_shutwr_server(const struct test_opts * opts)1152 static void test_stream_shutwr_server(const struct test_opts *opts)
1153 {
1154 	int fd;
1155 
1156 	fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
1157 	if (fd < 0) {
1158 		perror("accept");
1159 		exit(EXIT_FAILURE);
1160 	}
1161 
1162 	control_expectln("CLIENTDONE");
1163 
1164 	close(fd);
1165 }
1166 
test_stream_shutrd_client(const struct test_opts * opts)1167 static void test_stream_shutrd_client(const struct test_opts *opts)
1168 {
1169 	int fd;
1170 
1171 	struct sigaction act = {
1172 		.sa_handler = sigpipe,
1173 	};
1174 
1175 	sigaction(SIGPIPE, &act, NULL);
1176 
1177 	fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
1178 	if (fd < 0) {
1179 		perror("connect");
1180 		exit(EXIT_FAILURE);
1181 	}
1182 
1183 	control_expectln("SHUTRDDONE");
1184 
1185 	test_stream_check_sigpipe(fd);
1186 
1187 	control_writeln("CLIENTDONE");
1188 
1189 	close(fd);
1190 }
1191 
test_stream_shutrd_server(const struct test_opts * opts)1192 static void test_stream_shutrd_server(const struct test_opts *opts)
1193 {
1194 	int fd;
1195 
1196 	fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
1197 	if (fd < 0) {
1198 		perror("accept");
1199 		exit(EXIT_FAILURE);
1200 	}
1201 
1202 	if (shutdown(fd, SHUT_RD)) {
1203 		perror("shutdown");
1204 		exit(EXIT_FAILURE);
1205 	}
1206 
1207 	control_writeln("SHUTRDDONE");
1208 	control_expectln("CLIENTDONE");
1209 
1210 	close(fd);
1211 }
1212 
test_double_bind_connect_server(const struct test_opts * opts)1213 static void test_double_bind_connect_server(const struct test_opts *opts)
1214 {
1215 	int listen_fd, client_fd, i;
1216 	struct sockaddr_vm sa_client;
1217 	socklen_t socklen_client = sizeof(sa_client);
1218 
1219 	listen_fd = vsock_stream_listen(VMADDR_CID_ANY, opts->peer_port);
1220 
1221 	for (i = 0; i < 2; i++) {
1222 		control_writeln("LISTENING");
1223 
1224 		timeout_begin(TIMEOUT);
1225 		do {
1226 			client_fd = accept(listen_fd, (struct sockaddr *)&sa_client,
1227 					   &socklen_client);
1228 			timeout_check("accept");
1229 		} while (client_fd < 0 && errno == EINTR);
1230 		timeout_end();
1231 
1232 		if (client_fd < 0) {
1233 			perror("accept");
1234 			exit(EXIT_FAILURE);
1235 		}
1236 
1237 		/* Waiting for remote peer to close connection */
1238 		vsock_wait_remote_close(client_fd);
1239 	}
1240 
1241 	close(listen_fd);
1242 }
1243 
test_double_bind_connect_client(const struct test_opts * opts)1244 static void test_double_bind_connect_client(const struct test_opts *opts)
1245 {
1246 	int i, client_fd;
1247 
1248 	for (i = 0; i < 2; i++) {
1249 		/* Wait until server is ready to accept a new connection */
1250 		control_expectln("LISTENING");
1251 
1252 		/* We use 'peer_port + 1' as "some" port for the 'bind()'
1253 		 * call. It is safe for overflow, but must be considered,
1254 		 * when running multiple test applications simultaneously
1255 		 * where 'peer-port' argument differs by 1.
1256 		 */
1257 		client_fd = vsock_bind_connect(opts->peer_cid, opts->peer_port,
1258 					       opts->peer_port + 1, SOCK_STREAM);
1259 
1260 		close(client_fd);
1261 	}
1262 }
1263 
1264 #define MSG_BUF_IOCTL_LEN 64
test_unsent_bytes_server(const struct test_opts * opts,int type)1265 static void test_unsent_bytes_server(const struct test_opts *opts, int type)
1266 {
1267 	unsigned char buf[MSG_BUF_IOCTL_LEN];
1268 	int client_fd;
1269 
1270 	client_fd = vsock_accept(VMADDR_CID_ANY, opts->peer_port, NULL, type);
1271 	if (client_fd < 0) {
1272 		perror("accept");
1273 		exit(EXIT_FAILURE);
1274 	}
1275 
1276 	recv_buf(client_fd, buf, sizeof(buf), 0, sizeof(buf));
1277 	control_writeln("RECEIVED");
1278 
1279 	close(client_fd);
1280 }
1281 
test_unsent_bytes_client(const struct test_opts * opts,int type)1282 static void test_unsent_bytes_client(const struct test_opts *opts, int type)
1283 {
1284 	unsigned char buf[MSG_BUF_IOCTL_LEN];
1285 	int fd;
1286 
1287 	fd = vsock_connect(opts->peer_cid, opts->peer_port, type);
1288 	if (fd < 0) {
1289 		perror("connect");
1290 		exit(EXIT_FAILURE);
1291 	}
1292 
1293 	for (int i = 0; i < sizeof(buf); i++)
1294 		buf[i] = rand() & 0xFF;
1295 
1296 	send_buf(fd, buf, sizeof(buf), 0, sizeof(buf));
1297 	control_expectln("RECEIVED");
1298 
1299 	/* SIOCOUTQ isn't guaranteed to instantly track sent data. Even though
1300 	 * the "RECEIVED" message means that the other side has received the
1301 	 * data, there can be a delay in our kernel before updating the "unsent
1302 	 * bytes" counter. vsock_wait_sent() will repeat SIOCOUTQ until it
1303 	 * returns 0.
1304 	 */
1305 	if (!vsock_wait_sent(fd))
1306 		fprintf(stderr, "Test skipped, SIOCOUTQ not supported.\n");
1307 
1308 	close(fd);
1309 }
1310 
test_unread_bytes_server(const struct test_opts * opts,int type)1311 static void test_unread_bytes_server(const struct test_opts *opts, int type)
1312 {
1313 	unsigned char buf[MSG_BUF_IOCTL_LEN];
1314 	int client_fd;
1315 
1316 	client_fd = vsock_accept(VMADDR_CID_ANY, opts->peer_port, NULL, type);
1317 	if (client_fd < 0) {
1318 		perror("accept");
1319 		exit(EXIT_FAILURE);
1320 	}
1321 
1322 	for (int i = 0; i < sizeof(buf); i++)
1323 		buf[i] = rand() & 0xFF;
1324 
1325 	send_buf(client_fd, buf, sizeof(buf), 0, sizeof(buf));
1326 	control_writeln("SENT");
1327 
1328 	close(client_fd);
1329 }
1330 
test_unread_bytes_client(const struct test_opts * opts,int type)1331 static void test_unread_bytes_client(const struct test_opts *opts, int type)
1332 {
1333 	unsigned char buf[MSG_BUF_IOCTL_LEN];
1334 	int fd;
1335 
1336 	fd = vsock_connect(opts->peer_cid, opts->peer_port, type);
1337 	if (fd < 0) {
1338 		perror("connect");
1339 		exit(EXIT_FAILURE);
1340 	}
1341 
1342 	control_expectln("SENT");
1343 	/* The data has arrived but has not been read. The expected is
1344 	 * MSG_BUF_IOCTL_LEN.
1345 	 */
1346 	if (!vsock_ioctl_int(fd, SIOCINQ, MSG_BUF_IOCTL_LEN)) {
1347 		fprintf(stderr, "Test skipped, SIOCINQ not supported.\n");
1348 		goto out;
1349 	}
1350 
1351 	recv_buf(fd, buf, sizeof(buf), 0, sizeof(buf));
1352 	/* All data has been consumed, so the expected is 0. */
1353 	vsock_ioctl_int(fd, SIOCINQ, 0);
1354 
1355 out:
1356 	close(fd);
1357 }
1358 
test_stream_unsent_bytes_client(const struct test_opts * opts)1359 static void test_stream_unsent_bytes_client(const struct test_opts *opts)
1360 {
1361 	test_unsent_bytes_client(opts, SOCK_STREAM);
1362 }
1363 
test_stream_unsent_bytes_server(const struct test_opts * opts)1364 static void test_stream_unsent_bytes_server(const struct test_opts *opts)
1365 {
1366 	test_unsent_bytes_server(opts, SOCK_STREAM);
1367 }
1368 
test_seqpacket_unsent_bytes_client(const struct test_opts * opts)1369 static void test_seqpacket_unsent_bytes_client(const struct test_opts *opts)
1370 {
1371 	test_unsent_bytes_client(opts, SOCK_SEQPACKET);
1372 }
1373 
test_seqpacket_unsent_bytes_server(const struct test_opts * opts)1374 static void test_seqpacket_unsent_bytes_server(const struct test_opts *opts)
1375 {
1376 	test_unsent_bytes_server(opts, SOCK_SEQPACKET);
1377 }
1378 
test_stream_unread_bytes_client(const struct test_opts * opts)1379 static void test_stream_unread_bytes_client(const struct test_opts *opts)
1380 {
1381 	test_unread_bytes_client(opts, SOCK_STREAM);
1382 }
1383 
test_stream_unread_bytes_server(const struct test_opts * opts)1384 static void test_stream_unread_bytes_server(const struct test_opts *opts)
1385 {
1386 	test_unread_bytes_server(opts, SOCK_STREAM);
1387 }
1388 
test_seqpacket_unread_bytes_client(const struct test_opts * opts)1389 static void test_seqpacket_unread_bytes_client(const struct test_opts *opts)
1390 {
1391 	test_unread_bytes_client(opts, SOCK_SEQPACKET);
1392 }
1393 
test_seqpacket_unread_bytes_server(const struct test_opts * opts)1394 static void test_seqpacket_unread_bytes_server(const struct test_opts *opts)
1395 {
1396 	test_unread_bytes_server(opts, SOCK_SEQPACKET);
1397 }
1398 
1399 #define RCVLOWAT_CREDIT_UPD_BUF_SIZE	(1024 * 128)
1400 /* This define is the same as in 'include/linux/virtio_vsock.h':
1401  * it is used to decide when to send credit update message during
1402  * reading from rx queue of a socket. Value and its usage in
1403  * kernel is important for this test.
1404  */
1405 #define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE	(1024 * 64)
1406 
test_stream_rcvlowat_def_cred_upd_client(const struct test_opts * opts)1407 static void test_stream_rcvlowat_def_cred_upd_client(const struct test_opts *opts)
1408 {
1409 	size_t buf_size;
1410 	void *buf;
1411 	int fd;
1412 
1413 	fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
1414 	if (fd < 0) {
1415 		perror("connect");
1416 		exit(EXIT_FAILURE);
1417 	}
1418 
1419 	/* Send 1 byte more than peer's buffer size. */
1420 	buf_size = RCVLOWAT_CREDIT_UPD_BUF_SIZE + 1;
1421 
1422 	buf = malloc(buf_size);
1423 	if (!buf) {
1424 		perror("malloc");
1425 		exit(EXIT_FAILURE);
1426 	}
1427 
1428 	/* Wait until peer sets needed buffer size. */
1429 	recv_byte(fd, 1, 0);
1430 
1431 	if (send(fd, buf, buf_size, 0) != buf_size) {
1432 		perror("send failed");
1433 		exit(EXIT_FAILURE);
1434 	}
1435 
1436 	free(buf);
1437 	close(fd);
1438 }
1439 
test_stream_credit_update_test(const struct test_opts * opts,bool low_rx_bytes_test)1440 static void test_stream_credit_update_test(const struct test_opts *opts,
1441 					   bool low_rx_bytes_test)
1442 {
1443 	int recv_buf_size;
1444 	struct pollfd fds;
1445 	size_t buf_size;
1446 	unsigned long long sock_buf_size;
1447 	void *buf;
1448 	int fd;
1449 
1450 	fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
1451 	if (fd < 0) {
1452 		perror("accept");
1453 		exit(EXIT_FAILURE);
1454 	}
1455 
1456 	buf_size = RCVLOWAT_CREDIT_UPD_BUF_SIZE;
1457 
1458 	/* size_t can be < unsigned long long */
1459 	sock_buf_size = buf_size;
1460 
1461 	setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_SIZE,
1462 			     sock_buf_size,
1463 			     "setsockopt(SO_VM_SOCKETS_BUFFER_SIZE)");
1464 
1465 	if (low_rx_bytes_test) {
1466 		/* Set new SO_RCVLOWAT here. This enables sending credit
1467 		 * update when number of bytes if our rx queue become <
1468 		 * SO_RCVLOWAT value.
1469 		 */
1470 		recv_buf_size = 1 + VIRTIO_VSOCK_MAX_PKT_BUF_SIZE;
1471 
1472 		setsockopt_int_check(fd, SOL_SOCKET, SO_RCVLOWAT,
1473 				     recv_buf_size, "setsockopt(SO_RCVLOWAT)");
1474 	}
1475 
1476 	/* Send one dummy byte here, because 'setsockopt()' above also
1477 	 * sends special packet which tells sender to update our buffer
1478 	 * size. This 'send_byte()' will serialize such packet with data
1479 	 * reads in a loop below. Sender starts transmission only when
1480 	 * it receives this single byte.
1481 	 */
1482 	send_byte(fd, 1, 0);
1483 
1484 	buf = malloc(buf_size);
1485 	if (!buf) {
1486 		perror("malloc");
1487 		exit(EXIT_FAILURE);
1488 	}
1489 
1490 	/* Wait until there will be 128KB of data in rx queue. */
1491 	while (1) {
1492 		ssize_t res;
1493 
1494 		res = recv(fd, buf, buf_size, MSG_PEEK);
1495 		if (res == buf_size)
1496 			break;
1497 
1498 		if (res <= 0) {
1499 			fprintf(stderr, "unexpected 'recv()' return: %zi\n", res);
1500 			exit(EXIT_FAILURE);
1501 		}
1502 	}
1503 
1504 	/* There is 128KB of data in the socket's rx queue, dequeue first
1505 	 * 64KB, credit update is sent if 'low_rx_bytes_test' == true.
1506 	 * Otherwise, credit update is sent in 'if (!low_rx_bytes_test)'.
1507 	 */
1508 	recv_buf_size = VIRTIO_VSOCK_MAX_PKT_BUF_SIZE;
1509 	recv_buf(fd, buf, recv_buf_size, 0, recv_buf_size);
1510 
1511 	if (!low_rx_bytes_test) {
1512 		recv_buf_size++;
1513 
1514 		/* Updating SO_RCVLOWAT will send credit update. */
1515 		setsockopt_int_check(fd, SOL_SOCKET, SO_RCVLOWAT,
1516 				     recv_buf_size, "setsockopt(SO_RCVLOWAT)");
1517 	}
1518 
1519 	fds.fd = fd;
1520 	fds.events = POLLIN | POLLRDNORM | POLLERR |
1521 		     POLLRDHUP | POLLHUP;
1522 
1523 	/* This 'poll()' will return once we receive last byte
1524 	 * sent by client.
1525 	 */
1526 	if (poll(&fds, 1, -1) < 0) {
1527 		perror("poll");
1528 		exit(EXIT_FAILURE);
1529 	}
1530 
1531 	if (fds.revents & POLLERR) {
1532 		fprintf(stderr, "'poll()' error\n");
1533 		exit(EXIT_FAILURE);
1534 	}
1535 
1536 	if (fds.revents & (POLLIN | POLLRDNORM)) {
1537 		recv_buf(fd, buf, recv_buf_size, MSG_DONTWAIT, recv_buf_size);
1538 	} else {
1539 		/* These flags must be set, as there is at
1540 		 * least 64KB of data ready to read.
1541 		 */
1542 		fprintf(stderr, "POLLIN | POLLRDNORM expected\n");
1543 		exit(EXIT_FAILURE);
1544 	}
1545 
1546 	free(buf);
1547 	close(fd);
1548 }
1549 
test_stream_cred_upd_on_low_rx_bytes(const struct test_opts * opts)1550 static void test_stream_cred_upd_on_low_rx_bytes(const struct test_opts *opts)
1551 {
1552 	test_stream_credit_update_test(opts, true);
1553 }
1554 
test_stream_cred_upd_on_set_rcvlowat(const struct test_opts * opts)1555 static void test_stream_cred_upd_on_set_rcvlowat(const struct test_opts *opts)
1556 {
1557 	test_stream_credit_update_test(opts, false);
1558 }
1559 
1560 /* The goal of test leak_acceptq is to stress the race between connect() and
1561  * close(listener). Implementation of client/server loops boils down to:
1562  *
1563  * client                server
1564  * ------                ------
1565  * write(CONTINUE)
1566  *                       expect(CONTINUE)
1567  *                       listen()
1568  *                       write(LISTENING)
1569  * expect(LISTENING)
1570  * connect()             close()
1571  */
1572 #define ACCEPTQ_LEAK_RACE_TIMEOUT 2 /* seconds */
1573 
test_stream_leak_acceptq_client(const struct test_opts * opts)1574 static void test_stream_leak_acceptq_client(const struct test_opts *opts)
1575 {
1576 	time_t tout;
1577 	int fd;
1578 
1579 	tout = current_nsec() + ACCEPTQ_LEAK_RACE_TIMEOUT * NSEC_PER_SEC;
1580 	do {
1581 		control_writeulong(CONTROL_CONTINUE);
1582 
1583 		fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
1584 		if (fd >= 0)
1585 			close(fd);
1586 	} while (current_nsec() < tout);
1587 
1588 	control_writeulong(CONTROL_DONE);
1589 }
1590 
1591 /* Test for a memory leak. User is expected to run kmemleak scan, see README. */
test_stream_leak_acceptq_server(const struct test_opts * opts)1592 static void test_stream_leak_acceptq_server(const struct test_opts *opts)
1593 {
1594 	int fd;
1595 
1596 	while (control_readulong() == CONTROL_CONTINUE) {
1597 		fd = vsock_stream_listen(VMADDR_CID_ANY, opts->peer_port);
1598 		control_writeln("LISTENING");
1599 		close(fd);
1600 	}
1601 }
1602 
1603 /* Test for a memory leak. User is expected to run kmemleak scan, see README. */
test_stream_msgzcopy_leak_errq_client(const struct test_opts * opts)1604 static void test_stream_msgzcopy_leak_errq_client(const struct test_opts *opts)
1605 {
1606 	struct pollfd fds = { 0 };
1607 	int fd;
1608 
1609 	fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
1610 	if (fd < 0) {
1611 		perror("connect");
1612 		exit(EXIT_FAILURE);
1613 	}
1614 
1615 	enable_so_zerocopy_check(fd);
1616 	send_byte(fd, 1, MSG_ZEROCOPY);
1617 
1618 	fds.fd = fd;
1619 	fds.events = 0;
1620 	if (poll(&fds, 1, -1) < 0) {
1621 		perror("poll");
1622 		exit(EXIT_FAILURE);
1623 	}
1624 
1625 	close(fd);
1626 }
1627 
test_stream_msgzcopy_leak_errq_server(const struct test_opts * opts)1628 static void test_stream_msgzcopy_leak_errq_server(const struct test_opts *opts)
1629 {
1630 	int fd;
1631 
1632 	fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
1633 	if (fd < 0) {
1634 		perror("accept");
1635 		exit(EXIT_FAILURE);
1636 	}
1637 
1638 	recv_byte(fd, 1, 0);
1639 	vsock_wait_remote_close(fd);
1640 	close(fd);
1641 }
1642 
1643 /* Test msgzcopy_leak_zcskb is meant to exercise sendmsg() error handling path,
1644  * that might leak an skb. The idea is to fail virtio_transport_init_zcopy_skb()
1645  * by hitting net.core.optmem_max limit in sock_omalloc(), specifically
1646  *
1647  *   vsock_connectible_sendmsg
1648  *     virtio_transport_stream_enqueue
1649  *       virtio_transport_send_pkt_info
1650  *         virtio_transport_init_zcopy_skb
1651  *         . msg_zerocopy_realloc
1652  *         .   msg_zerocopy_alloc
1653  *         .     sock_omalloc
1654  *         .       sk_omem_alloc + size > sysctl_optmem_max
1655  *         return -ENOMEM
1656  *
1657  * We abuse the implementation detail of net/socket.c:____sys_sendmsg().
1658  * sk_omem_alloc can be precisely bumped by sock_kmalloc(), as it is used to
1659  * fetch user-provided control data.
1660  *
1661  * While this approach works for now, it relies on assumptions regarding the
1662  * implementation and configuration (for example, order of net.core.optmem_max
1663  * can not exceed MAX_PAGE_ORDER), which may not hold in the future. A more
1664  * resilient testing could be implemented by leveraging the Fault injection
1665  * framework (CONFIG_FAULT_INJECTION), e.g.
1666  *
1667  *   client# echo N > /sys/kernel/debug/failslab/ignore-gfp-wait
1668  *   client# echo 0 > /sys/kernel/debug/failslab/verbose
1669  *
1670  *   void client(const struct test_opts *opts)
1671  *   {
1672  *       char buf[16];
1673  *       int f, s, i;
1674  *
1675  *       f = open("/proc/self/fail-nth", O_WRONLY);
1676  *
1677  *       for (i = 1; i < 32; i++) {
1678  *           control_writeulong(CONTROL_CONTINUE);
1679  *
1680  *           s = vsock_stream_connect(opts->peer_cid, opts->peer_port);
1681  *           enable_so_zerocopy_check(s);
1682  *
1683  *           sprintf(buf, "%d", i);
1684  *           write(f, buf, strlen(buf));
1685  *
1686  *           send(s, &(char){ 0 }, 1, MSG_ZEROCOPY);
1687  *
1688  *           write(f, "0", 1);
1689  *           close(s);
1690  *       }
1691  *
1692  *       control_writeulong(CONTROL_DONE);
1693  *       close(f);
1694  *   }
1695  *
1696  *   void server(const struct test_opts *opts)
1697  *   {
1698  *       int fd;
1699  *
1700  *       while (control_readulong() == CONTROL_CONTINUE) {
1701  *           fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
1702  *           vsock_wait_remote_close(fd);
1703  *           close(fd);
1704  *       }
1705  *   }
1706  *
1707  * Refer to Documentation/fault-injection/fault-injection.rst.
1708  */
1709 #define MAX_PAGE_ORDER	10	/* usually */
1710 #define PAGE_SIZE	4096
1711 
1712 /* Test for a memory leak. User is expected to run kmemleak scan, see README. */
test_stream_msgzcopy_leak_zcskb_client(const struct test_opts * opts)1713 static void test_stream_msgzcopy_leak_zcskb_client(const struct test_opts *opts)
1714 {
1715 	size_t optmem_max, ctl_len, chunk_size;
1716 	struct msghdr msg = { 0 };
1717 	struct iovec iov;
1718 	char *chunk;
1719 	int fd, res;
1720 	FILE *f;
1721 
1722 	f = fopen("/proc/sys/net/core/optmem_max", "r");
1723 	if (!f) {
1724 		perror("fopen(optmem_max)");
1725 		exit(EXIT_FAILURE);
1726 	}
1727 
1728 	if (fscanf(f, "%zu", &optmem_max) != 1) {
1729 		fprintf(stderr, "fscanf(optmem_max) failed\n");
1730 		exit(EXIT_FAILURE);
1731 	}
1732 
1733 	fclose(f);
1734 
1735 	fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
1736 	if (fd < 0) {
1737 		perror("connect");
1738 		exit(EXIT_FAILURE);
1739 	}
1740 
1741 	enable_so_zerocopy_check(fd);
1742 
1743 	ctl_len = optmem_max - 1;
1744 	if (ctl_len > PAGE_SIZE << MAX_PAGE_ORDER) {
1745 		fprintf(stderr, "Try with net.core.optmem_max = 100000\n");
1746 		exit(EXIT_FAILURE);
1747 	}
1748 
1749 	chunk_size = CMSG_SPACE(ctl_len);
1750 	chunk = malloc(chunk_size);
1751 	if (!chunk) {
1752 		perror("malloc");
1753 		exit(EXIT_FAILURE);
1754 	}
1755 	memset(chunk, 0, chunk_size);
1756 
1757 	iov.iov_base = &(char){ 0 };
1758 	iov.iov_len = 1;
1759 
1760 	msg.msg_iov = &iov;
1761 	msg.msg_iovlen = 1;
1762 	msg.msg_control = chunk;
1763 	msg.msg_controllen = ctl_len;
1764 
1765 	errno = 0;
1766 	res = sendmsg(fd, &msg, MSG_ZEROCOPY);
1767 	if (res >= 0 || errno != ENOMEM) {
1768 		fprintf(stderr, "Expected ENOMEM, got errno=%d res=%d\n",
1769 			errno, res);
1770 		exit(EXIT_FAILURE);
1771 	}
1772 
1773 	close(fd);
1774 }
1775 
test_stream_msgzcopy_leak_zcskb_server(const struct test_opts * opts)1776 static void test_stream_msgzcopy_leak_zcskb_server(const struct test_opts *opts)
1777 {
1778 	int fd;
1779 
1780 	fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
1781 	if (fd < 0) {
1782 		perror("accept");
1783 		exit(EXIT_FAILURE);
1784 	}
1785 
1786 	vsock_wait_remote_close(fd);
1787 	close(fd);
1788 }
1789 
1790 #define MAX_PORT_RETRIES	24	/* net/vmw_vsock/af_vsock.c */
1791 
test_stream_transport_uaf(int cid)1792 static bool test_stream_transport_uaf(int cid)
1793 {
1794 	int sockets[MAX_PORT_RETRIES];
1795 	struct sockaddr_vm addr;
1796 	socklen_t alen;
1797 	int fd, i, c;
1798 	bool ret;
1799 
1800 	/* Probe for a transport by attempting a local CID bind. Unavailable
1801 	 * transport (or more specifically: an unsupported transport/CID
1802 	 * combination) results in EADDRNOTAVAIL, other errnos are fatal.
1803 	 */
1804 	fd = vsock_bind_try(cid, VMADDR_PORT_ANY, SOCK_STREAM);
1805 	if (fd < 0) {
1806 		if (errno != EADDRNOTAVAIL) {
1807 			perror("Unexpected bind() errno");
1808 			exit(EXIT_FAILURE);
1809 		}
1810 
1811 		return false;
1812 	}
1813 
1814 	alen = sizeof(addr);
1815 	if (getsockname(fd, (struct sockaddr *)&addr, &alen)) {
1816 		perror("getsockname");
1817 		exit(EXIT_FAILURE);
1818 	}
1819 
1820 	/* Drain the autobind pool; see __vsock_bind_connectible(). */
1821 	for (i = 0; i < MAX_PORT_RETRIES; ++i)
1822 		sockets[i] = vsock_bind(cid, ++addr.svm_port, SOCK_STREAM);
1823 
1824 	close(fd);
1825 
1826 	/* Setting SOCK_NONBLOCK makes connect() return soon after
1827 	 * (re-)assigning the transport. We are not connecting to anything
1828 	 * anyway, so there is no point entering the main loop in
1829 	 * vsock_connect(); waiting for timeout, checking for signals, etc.
1830 	 */
1831 	fd = socket(AF_VSOCK, SOCK_STREAM | SOCK_NONBLOCK, 0);
1832 	if (fd < 0) {
1833 		perror("socket");
1834 		exit(EXIT_FAILURE);
1835 	}
1836 
1837 	/* Assign transport, while failing to autobind. Autobind pool was
1838 	 * drained, so EADDRNOTAVAIL coming from __vsock_bind_connectible() is
1839 	 * expected.
1840 	 *
1841 	 * One exception is ENODEV which is thrown by vsock_assign_transport(),
1842 	 * i.e. before vsock_auto_bind(), when the only transport loaded is
1843 	 * vhost.
1844 	 */
1845 	if (!connect(fd, (struct sockaddr *)&addr, alen)) {
1846 		fprintf(stderr, "Unexpected connect() success\n");
1847 		exit(EXIT_FAILURE);
1848 	}
1849 	if (errno == ENODEV && cid == VMADDR_CID_HOST) {
1850 		ret = false;
1851 		goto cleanup;
1852 	}
1853 	if (errno != EADDRNOTAVAIL) {
1854 		perror("Unexpected connect() errno");
1855 		exit(EXIT_FAILURE);
1856 	}
1857 
1858 	/* Reassign transport, triggering old transport release and
1859 	 * (potentially) unbinding of an unbound socket.
1860 	 *
1861 	 * Vulnerable system may crash now.
1862 	 */
1863 	for (c = VMADDR_CID_HYPERVISOR; c <= VMADDR_CID_HOST + 1; ++c) {
1864 		if (c != cid) {
1865 			addr.svm_cid = c;
1866 			(void)connect(fd, (struct sockaddr *)&addr, alen);
1867 		}
1868 	}
1869 
1870 	ret = true;
1871 cleanup:
1872 	close(fd);
1873 	while (i--)
1874 		close(sockets[i]);
1875 
1876 	return ret;
1877 }
1878 
1879 /* Test attempts to trigger a transport release for an unbound socket. This can
1880  * lead to a reference count mishandling.
1881  */
test_stream_transport_uaf_client(const struct test_opts * opts)1882 static void test_stream_transport_uaf_client(const struct test_opts *opts)
1883 {
1884 	bool tested = false;
1885 	int cid, tr;
1886 
1887 	for (cid = VMADDR_CID_HYPERVISOR; cid <= VMADDR_CID_HOST + 1; ++cid)
1888 		tested |= test_stream_transport_uaf(cid);
1889 
1890 	tr = get_transports();
1891 	if (!tr)
1892 		fprintf(stderr, "No transports detected\n");
1893 	else if (tr == TRANSPORT_VIRTIO)
1894 		fprintf(stderr, "Setup unsupported: sole virtio transport\n");
1895 	else if (!tested)
1896 		fprintf(stderr, "No transports tested\n");
1897 }
1898 
test_stream_connect_retry_client(const struct test_opts * opts)1899 static void test_stream_connect_retry_client(const struct test_opts *opts)
1900 {
1901 	int fd;
1902 
1903 	fd = socket(AF_VSOCK, SOCK_STREAM, 0);
1904 	if (fd < 0) {
1905 		perror("socket");
1906 		exit(EXIT_FAILURE);
1907 	}
1908 
1909 	if (!vsock_connect_fd(fd, opts->peer_cid, opts->peer_port)) {
1910 		fprintf(stderr, "Unexpected connect() #1 success\n");
1911 		exit(EXIT_FAILURE);
1912 	}
1913 
1914 	control_writeln("LISTEN");
1915 	control_expectln("LISTENING");
1916 
1917 	if (vsock_connect_fd(fd, opts->peer_cid, opts->peer_port)) {
1918 		perror("connect() #2");
1919 		exit(EXIT_FAILURE);
1920 	}
1921 
1922 	close(fd);
1923 }
1924 
test_stream_connect_retry_server(const struct test_opts * opts)1925 static void test_stream_connect_retry_server(const struct test_opts *opts)
1926 {
1927 	int fd;
1928 
1929 	control_expectln("LISTEN");
1930 
1931 	fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
1932 	if (fd < 0) {
1933 		perror("accept");
1934 		exit(EXIT_FAILURE);
1935 	}
1936 
1937 	vsock_wait_remote_close(fd);
1938 	close(fd);
1939 }
1940 
1941 #define TRANSPORT_CHANGE_TIMEOUT 2 /* seconds */
1942 
test_stream_transport_change_thread(void * vargp)1943 static void *test_stream_transport_change_thread(void *vargp)
1944 {
1945 	pid_t *pid = (pid_t *)vargp;
1946 	int ret;
1947 
1948 	ret = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
1949 	if (ret) {
1950 		fprintf(stderr, "pthread_setcanceltype: %d\n", ret);
1951 		exit(EXIT_FAILURE);
1952 	}
1953 
1954 	while (true) {
1955 		if (kill(*pid, SIGUSR1) < 0) {
1956 			perror("kill");
1957 			exit(EXIT_FAILURE);
1958 		}
1959 	}
1960 	return NULL;
1961 }
1962 
test_transport_change_signal_handler(int signal)1963 static void test_transport_change_signal_handler(int signal)
1964 {
1965 	/* We need a custom handler for SIGUSR1 as the default one terminates the process. */
1966 }
1967 
test_stream_transport_change_client(const struct test_opts * opts)1968 static void test_stream_transport_change_client(const struct test_opts *opts)
1969 {
1970 	__sighandler_t old_handler;
1971 	pid_t pid = getpid();
1972 	pthread_t thread_id;
1973 	time_t tout;
1974 	int ret, tr;
1975 
1976 	tr = get_transports();
1977 
1978 	/* Print a warning if there is a G2H transport loaded.
1979 	 * This is on a best effort basis because VMCI can be either G2H and H2G, and there is
1980 	 * no easy way to understand it.
1981 	 * The bug we are testing only appears when G2H transports are not loaded.
1982 	 * This is because `vsock_assign_transport`, when using CID 0, assigns a G2H transport
1983 	 * to vsk->transport. If none is available it is set to NULL, causing the null-ptr-deref.
1984 	 */
1985 	if (tr & TRANSPORTS_G2H)
1986 		fprintf(stderr, "G2H Transport detected. This test will not fail.\n");
1987 
1988 	old_handler = signal(SIGUSR1, test_transport_change_signal_handler);
1989 	if (old_handler == SIG_ERR) {
1990 		perror("signal");
1991 		exit(EXIT_FAILURE);
1992 	}
1993 
1994 	ret = pthread_create(&thread_id, NULL, test_stream_transport_change_thread, &pid);
1995 	if (ret) {
1996 		fprintf(stderr, "pthread_create: %d\n", ret);
1997 		exit(EXIT_FAILURE);
1998 	}
1999 
2000 	control_expectln("LISTENING");
2001 
2002 	tout = current_nsec() + TRANSPORT_CHANGE_TIMEOUT * NSEC_PER_SEC;
2003 	do {
2004 		struct sockaddr_vm sa = {
2005 			.svm_family = AF_VSOCK,
2006 			.svm_cid = opts->peer_cid,
2007 			.svm_port = opts->peer_port,
2008 		};
2009 		bool send_control = false;
2010 		int s;
2011 
2012 		s = socket(AF_VSOCK, SOCK_STREAM, 0);
2013 		if (s < 0) {
2014 			perror("socket");
2015 			exit(EXIT_FAILURE);
2016 		}
2017 
2018 		ret = connect(s, (struct sockaddr *)&sa, sizeof(sa));
2019 		/* The connect can fail due to signals coming from the thread,
2020 		 * or because the receiver connection queue is full.
2021 		 * Ignoring also the latter case because there is no way
2022 		 * of synchronizing client's connect and server's accept when
2023 		 * connect(s) are constantly being interrupted by signals.
2024 		 */
2025 		if (ret == -1 && (errno != EINTR && errno != ECONNRESET)) {
2026 			perror("connect");
2027 			exit(EXIT_FAILURE);
2028 		}
2029 
2030 		/* Notify the server if the connect() is successful or the
2031 		 * receiver connection queue is full, so it will do accept()
2032 		 * to drain it.
2033 		 */
2034 		if (!ret || errno == ECONNRESET)
2035 			send_control = true;
2036 
2037 		/* Set CID to 0 cause a transport change. */
2038 		sa.svm_cid = 0;
2039 
2040 		/* There is a case where this will not fail:
2041 		 * if the previous connect() is interrupted while the
2042 		 * connection request is already sent, this second
2043 		 * connect() will wait for the response.
2044 		 */
2045 		ret = connect(s, (struct sockaddr *)&sa, sizeof(sa));
2046 		if (!ret || errno == ECONNRESET)
2047 			send_control = true;
2048 
2049 		close(s);
2050 
2051 		if (send_control)
2052 			control_writeulong(CONTROL_CONTINUE);
2053 
2054 	} while (current_nsec() < tout);
2055 
2056 	control_writeulong(CONTROL_DONE);
2057 
2058 	ret = pthread_cancel(thread_id);
2059 	if (ret) {
2060 		fprintf(stderr, "pthread_cancel: %d\n", ret);
2061 		exit(EXIT_FAILURE);
2062 	}
2063 
2064 	ret = pthread_join(thread_id, NULL);
2065 	if (ret) {
2066 		fprintf(stderr, "pthread_join: %d\n", ret);
2067 		exit(EXIT_FAILURE);
2068 	}
2069 
2070 	if (signal(SIGUSR1, old_handler) == SIG_ERR) {
2071 		perror("signal");
2072 		exit(EXIT_FAILURE);
2073 	}
2074 }
2075 
test_stream_transport_change_server(const struct test_opts * opts)2076 static void test_stream_transport_change_server(const struct test_opts *opts)
2077 {
2078 	int s = vsock_stream_listen(VMADDR_CID_ANY, opts->peer_port);
2079 
2080 	/* Set the socket to be nonblocking because connects that have been interrupted
2081 	 * (EINTR) can fill the receiver's accept queue anyway, leading to connect failure.
2082 	 * As of today (6.15) in such situation there is no way to understand, from the
2083 	 * client side, if the connection has been queued in the server or not.
2084 	 */
2085 	if (fcntl(s, F_SETFL, fcntl(s, F_GETFL, 0) | O_NONBLOCK) < 0) {
2086 		perror("fcntl");
2087 		exit(EXIT_FAILURE);
2088 	}
2089 	control_writeln("LISTENING");
2090 
2091 	while (control_readulong() == CONTROL_CONTINUE) {
2092 		/* Must accept the connection, otherwise the `listen`
2093 		 * queue will fill up and new connections will fail.
2094 		 * There can be more than one queued connection,
2095 		 * clear them all.
2096 		 */
2097 		while (true) {
2098 			int client = accept(s, NULL, NULL);
2099 
2100 			if (client < 0) {
2101 				if (errno == EAGAIN)
2102 					break;
2103 
2104 				perror("accept");
2105 				exit(EXIT_FAILURE);
2106 			}
2107 
2108 			close(client);
2109 		}
2110 	}
2111 
2112 	close(s);
2113 }
2114 
test_stream_linger_client(const struct test_opts * opts)2115 static void test_stream_linger_client(const struct test_opts *opts)
2116 {
2117 	int fd;
2118 
2119 	fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
2120 	if (fd < 0) {
2121 		perror("connect");
2122 		exit(EXIT_FAILURE);
2123 	}
2124 
2125 	enable_so_linger(fd, 1);
2126 	close(fd);
2127 }
2128 
test_stream_linger_server(const struct test_opts * opts)2129 static void test_stream_linger_server(const struct test_opts *opts)
2130 {
2131 	int fd;
2132 
2133 	fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
2134 	if (fd < 0) {
2135 		perror("accept");
2136 		exit(EXIT_FAILURE);
2137 	}
2138 
2139 	vsock_wait_remote_close(fd);
2140 	close(fd);
2141 }
2142 
2143 /* Half of the default to not risk timing out the control channel */
2144 #define LINGER_TIMEOUT	(TIMEOUT / 2)
2145 
test_stream_nolinger_client(const struct test_opts * opts)2146 static void test_stream_nolinger_client(const struct test_opts *opts)
2147 {
2148 	bool waited;
2149 	time_t ns;
2150 	int fd;
2151 
2152 	fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
2153 	if (fd < 0) {
2154 		perror("connect");
2155 		exit(EXIT_FAILURE);
2156 	}
2157 
2158 	enable_so_linger(fd, LINGER_TIMEOUT);
2159 	send_byte(fd, 1, 0); /* Left unread to expose incorrect behaviour. */
2160 	waited = vsock_wait_sent(fd);
2161 
2162 	ns = current_nsec();
2163 	close(fd);
2164 	ns = current_nsec() - ns;
2165 
2166 	if (!waited) {
2167 		fprintf(stderr, "Test skipped, SIOCOUTQ not supported.\n");
2168 	} else if (DIV_ROUND_UP(ns, NSEC_PER_SEC) >= LINGER_TIMEOUT) {
2169 		fprintf(stderr, "Unexpected lingering\n");
2170 		exit(EXIT_FAILURE);
2171 	}
2172 
2173 	control_writeln("DONE");
2174 }
2175 
test_stream_nolinger_server(const struct test_opts * opts)2176 static void test_stream_nolinger_server(const struct test_opts *opts)
2177 {
2178 	int fd;
2179 
2180 	fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
2181 	if (fd < 0) {
2182 		perror("accept");
2183 		exit(EXIT_FAILURE);
2184 	}
2185 
2186 	control_expectln("DONE");
2187 	close(fd);
2188 }
2189 
2190 static struct test_case test_cases[] = {
2191 	{
2192 		.name = "SOCK_STREAM connection reset",
2193 		.run_client = test_stream_connection_reset,
2194 	},
2195 	{
2196 		.name = "SOCK_STREAM bind only",
2197 		.run_client = test_stream_bind_only_client,
2198 		.run_server = test_stream_bind_only_server,
2199 	},
2200 	{
2201 		.name = "SOCK_STREAM client close",
2202 		.run_client = test_stream_client_close_client,
2203 		.run_server = test_stream_client_close_server,
2204 	},
2205 	{
2206 		.name = "SOCK_STREAM server close",
2207 		.run_client = test_stream_server_close_client,
2208 		.run_server = test_stream_server_close_server,
2209 	},
2210 	{
2211 		.name = "SOCK_STREAM multiple connections",
2212 		.run_client = test_stream_multiconn_client,
2213 		.run_server = test_stream_multiconn_server,
2214 	},
2215 	{
2216 		.name = "SOCK_STREAM MSG_PEEK",
2217 		.run_client = test_stream_msg_peek_client,
2218 		.run_server = test_stream_msg_peek_server,
2219 	},
2220 	{
2221 		.name = "SOCK_SEQPACKET msg bounds",
2222 		.run_client = test_seqpacket_msg_bounds_client,
2223 		.run_server = test_seqpacket_msg_bounds_server,
2224 	},
2225 	{
2226 		.name = "SOCK_SEQPACKET MSG_TRUNC flag",
2227 		.run_client = test_seqpacket_msg_trunc_client,
2228 		.run_server = test_seqpacket_msg_trunc_server,
2229 	},
2230 	{
2231 		.name = "SOCK_SEQPACKET timeout",
2232 		.run_client = test_seqpacket_timeout_client,
2233 		.run_server = test_seqpacket_timeout_server,
2234 	},
2235 	{
2236 		.name = "SOCK_SEQPACKET invalid receive buffer",
2237 		.run_client = test_seqpacket_invalid_rec_buffer_client,
2238 		.run_server = test_seqpacket_invalid_rec_buffer_server,
2239 	},
2240 	{
2241 		.name = "SOCK_STREAM poll() + SO_RCVLOWAT",
2242 		.run_client = test_stream_poll_rcvlowat_client,
2243 		.run_server = test_stream_poll_rcvlowat_server,
2244 	},
2245 	{
2246 		.name = "SOCK_SEQPACKET big message",
2247 		.run_client = test_seqpacket_bigmsg_client,
2248 		.run_server = test_seqpacket_bigmsg_server,
2249 	},
2250 	{
2251 		.name = "SOCK_STREAM test invalid buffer",
2252 		.run_client = test_stream_inv_buf_client,
2253 		.run_server = test_stream_inv_buf_server,
2254 	},
2255 	{
2256 		.name = "SOCK_SEQPACKET test invalid buffer",
2257 		.run_client = test_seqpacket_inv_buf_client,
2258 		.run_server = test_seqpacket_inv_buf_server,
2259 	},
2260 	{
2261 		.name = "SOCK_STREAM virtio skb merge",
2262 		.run_client = test_stream_virtio_skb_merge_client,
2263 		.run_server = test_stream_virtio_skb_merge_server,
2264 	},
2265 	{
2266 		.name = "SOCK_SEQPACKET MSG_PEEK",
2267 		.run_client = test_seqpacket_msg_peek_client,
2268 		.run_server = test_seqpacket_msg_peek_server,
2269 	},
2270 	{
2271 		.name = "SOCK_STREAM SHUT_WR",
2272 		.run_client = test_stream_shutwr_client,
2273 		.run_server = test_stream_shutwr_server,
2274 	},
2275 	{
2276 		.name = "SOCK_STREAM SHUT_RD",
2277 		.run_client = test_stream_shutrd_client,
2278 		.run_server = test_stream_shutrd_server,
2279 	},
2280 	{
2281 		.name = "SOCK_STREAM MSG_ZEROCOPY",
2282 		.run_client = test_stream_msgzcopy_client,
2283 		.run_server = test_stream_msgzcopy_server,
2284 	},
2285 	{
2286 		.name = "SOCK_SEQPACKET MSG_ZEROCOPY",
2287 		.run_client = test_seqpacket_msgzcopy_client,
2288 		.run_server = test_seqpacket_msgzcopy_server,
2289 	},
2290 	{
2291 		.name = "SOCK_STREAM MSG_ZEROCOPY empty MSG_ERRQUEUE",
2292 		.run_client = test_stream_msgzcopy_empty_errq_client,
2293 		.run_server = test_stream_msgzcopy_empty_errq_server,
2294 	},
2295 	{
2296 		.name = "SOCK_STREAM double bind connect",
2297 		.run_client = test_double_bind_connect_client,
2298 		.run_server = test_double_bind_connect_server,
2299 	},
2300 	{
2301 		.name = "SOCK_STREAM virtio credit update + SO_RCVLOWAT",
2302 		.run_client = test_stream_rcvlowat_def_cred_upd_client,
2303 		.run_server = test_stream_cred_upd_on_set_rcvlowat,
2304 	},
2305 	{
2306 		.name = "SOCK_STREAM virtio credit update + low rx_bytes",
2307 		.run_client = test_stream_rcvlowat_def_cred_upd_client,
2308 		.run_server = test_stream_cred_upd_on_low_rx_bytes,
2309 	},
2310 	{
2311 		.name = "SOCK_STREAM ioctl(SIOCOUTQ) 0 unsent bytes",
2312 		.run_client = test_stream_unsent_bytes_client,
2313 		.run_server = test_stream_unsent_bytes_server,
2314 	},
2315 	{
2316 		.name = "SOCK_SEQPACKET ioctl(SIOCOUTQ) 0 unsent bytes",
2317 		.run_client = test_seqpacket_unsent_bytes_client,
2318 		.run_server = test_seqpacket_unsent_bytes_server,
2319 	},
2320 	{
2321 		.name = "SOCK_STREAM leak accept queue",
2322 		.run_client = test_stream_leak_acceptq_client,
2323 		.run_server = test_stream_leak_acceptq_server,
2324 	},
2325 	{
2326 		.name = "SOCK_STREAM MSG_ZEROCOPY leak MSG_ERRQUEUE",
2327 		.run_client = test_stream_msgzcopy_leak_errq_client,
2328 		.run_server = test_stream_msgzcopy_leak_errq_server,
2329 	},
2330 	{
2331 		.name = "SOCK_STREAM MSG_ZEROCOPY leak completion skb",
2332 		.run_client = test_stream_msgzcopy_leak_zcskb_client,
2333 		.run_server = test_stream_msgzcopy_leak_zcskb_server,
2334 	},
2335 	{
2336 		.name = "SOCK_STREAM transport release use-after-free",
2337 		.run_client = test_stream_transport_uaf_client,
2338 	},
2339 	{
2340 		.name = "SOCK_STREAM retry failed connect()",
2341 		.run_client = test_stream_connect_retry_client,
2342 		.run_server = test_stream_connect_retry_server,
2343 	},
2344 	{
2345 		.name = "SOCK_STREAM SO_LINGER null-ptr-deref",
2346 		.run_client = test_stream_linger_client,
2347 		.run_server = test_stream_linger_server,
2348 	},
2349 	{
2350 		.name = "SOCK_STREAM SO_LINGER close() on unread",
2351 		.run_client = test_stream_nolinger_client,
2352 		.run_server = test_stream_nolinger_server,
2353 	},
2354 	{
2355 		.name = "SOCK_STREAM transport change null-ptr-deref",
2356 		.run_client = test_stream_transport_change_client,
2357 		.run_server = test_stream_transport_change_server,
2358 	},
2359 	{
2360 		.name = "SOCK_STREAM ioctl(SIOCINQ) functionality",
2361 		.run_client = test_stream_unread_bytes_client,
2362 		.run_server = test_stream_unread_bytes_server,
2363 	},
2364 	{
2365 		.name = "SOCK_SEQPACKET ioctl(SIOCINQ) functionality",
2366 		.run_client = test_seqpacket_unread_bytes_client,
2367 		.run_server = test_seqpacket_unread_bytes_server,
2368 	},
2369 	{},
2370 };
2371 
2372 static const char optstring[] = "";
2373 static const struct option longopts[] = {
2374 	{
2375 		.name = "control-host",
2376 		.has_arg = required_argument,
2377 		.val = 'H',
2378 	},
2379 	{
2380 		.name = "control-port",
2381 		.has_arg = required_argument,
2382 		.val = 'P',
2383 	},
2384 	{
2385 		.name = "mode",
2386 		.has_arg = required_argument,
2387 		.val = 'm',
2388 	},
2389 	{
2390 		.name = "peer-cid",
2391 		.has_arg = required_argument,
2392 		.val = 'p',
2393 	},
2394 	{
2395 		.name = "peer-port",
2396 		.has_arg = required_argument,
2397 		.val = 'q',
2398 	},
2399 	{
2400 		.name = "list",
2401 		.has_arg = no_argument,
2402 		.val = 'l',
2403 	},
2404 	{
2405 		.name = "skip",
2406 		.has_arg = required_argument,
2407 		.val = 's',
2408 	},
2409 	{
2410 		.name = "pick",
2411 		.has_arg = required_argument,
2412 		.val = 't',
2413 	},
2414 	{
2415 		.name = "help",
2416 		.has_arg = no_argument,
2417 		.val = '?',
2418 	},
2419 	{},
2420 };
2421 
usage(void)2422 static void usage(void)
2423 {
2424 	fprintf(stderr, "Usage: vsock_test [--help] [--control-host=<host>] --control-port=<port> --mode=client|server --peer-cid=<cid> [--peer-port=<port>] [--list] [--skip=<test_id>]\n"
2425 		"\n"
2426 		"  Server: vsock_test --control-port=1234 --mode=server --peer-cid=3\n"
2427 		"  Client: vsock_test --control-host=192.168.0.1 --control-port=1234 --mode=client --peer-cid=2\n"
2428 		"\n"
2429 		"Run vsock.ko tests.  Must be launched in both guest\n"
2430 		"and host.  One side must use --mode=client and\n"
2431 		"the other side must use --mode=server.\n"
2432 		"\n"
2433 		"A TCP control socket connection is used to coordinate tests\n"
2434 		"between the client and the server.  The server requires a\n"
2435 		"listen address and the client requires an address to\n"
2436 		"connect to.\n"
2437 		"\n"
2438 		"The CID of the other side must be given with --peer-cid=<cid>.\n"
2439 		"During the test, two AF_VSOCK ports will be used: the port\n"
2440 		"specified with --peer-port=<port> (or the default port)\n"
2441 		"and the next one.\n"
2442 		"\n"
2443 		"Options:\n"
2444 		"  --help                 This help message\n"
2445 		"  --control-host <host>  Server IP address to connect to\n"
2446 		"  --control-port <port>  Server port to listen on/connect to\n"
2447 		"  --mode client|server   Server or client mode\n"
2448 		"  --peer-cid <cid>       CID of the other side\n"
2449 		"  --peer-port <port>     AF_VSOCK port used for the test [default: %d]\n"
2450 		"  --list                 List of tests that will be executed\n"
2451 		"  --pick <test_id>       Test ID to execute selectively;\n"
2452 		"                         use multiple --pick options to select more tests\n"
2453 		"  --skip <test_id>       Test ID to skip;\n"
2454 		"                         use multiple --skip options to skip more tests\n",
2455 		DEFAULT_PEER_PORT
2456 		);
2457 	exit(EXIT_FAILURE);
2458 }
2459 
main(int argc,char ** argv)2460 int main(int argc, char **argv)
2461 {
2462 	const char *control_host = NULL;
2463 	const char *control_port = NULL;
2464 	struct test_opts opts = {
2465 		.mode = TEST_MODE_UNSET,
2466 		.peer_cid = VMADDR_CID_ANY,
2467 		.peer_port = DEFAULT_PEER_PORT,
2468 	};
2469 
2470 	srand(time(NULL));
2471 	init_signals();
2472 
2473 	for (;;) {
2474 		int opt = getopt_long(argc, argv, optstring, longopts, NULL);
2475 
2476 		if (opt == -1)
2477 			break;
2478 
2479 		switch (opt) {
2480 		case 'H':
2481 			control_host = optarg;
2482 			break;
2483 		case 'm':
2484 			if (strcmp(optarg, "client") == 0)
2485 				opts.mode = TEST_MODE_CLIENT;
2486 			else if (strcmp(optarg, "server") == 0)
2487 				opts.mode = TEST_MODE_SERVER;
2488 			else {
2489 				fprintf(stderr, "--mode must be \"client\" or \"server\"\n");
2490 				return EXIT_FAILURE;
2491 			}
2492 			break;
2493 		case 'p':
2494 			opts.peer_cid = parse_cid(optarg);
2495 			break;
2496 		case 'q':
2497 			opts.peer_port = parse_port(optarg);
2498 			break;
2499 		case 'P':
2500 			control_port = optarg;
2501 			break;
2502 		case 'l':
2503 			list_tests(test_cases);
2504 			break;
2505 		case 's':
2506 			skip_test(test_cases, ARRAY_SIZE(test_cases) - 1,
2507 				  optarg);
2508 			break;
2509 		case 't':
2510 			pick_test(test_cases, ARRAY_SIZE(test_cases) - 1,
2511 				  optarg);
2512 			break;
2513 		case '?':
2514 		default:
2515 			usage();
2516 		}
2517 	}
2518 
2519 	if (!control_port)
2520 		usage();
2521 	if (opts.mode == TEST_MODE_UNSET)
2522 		usage();
2523 	if (opts.peer_cid == VMADDR_CID_ANY)
2524 		usage();
2525 
2526 	if (!control_host) {
2527 		if (opts.mode != TEST_MODE_SERVER)
2528 			usage();
2529 		control_host = "0.0.0.0";
2530 	}
2531 
2532 	control_init(control_host, control_port,
2533 		     opts.mode == TEST_MODE_SERVER);
2534 
2535 	run_tests(test_cases, &opts);
2536 
2537 	control_cleanup();
2538 	return EXIT_SUCCESS;
2539 }
2540