1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2019 The FreeBSD Foundation
5 *
6 * This software was developed by BFF Storage Systems, LLC under sponsorship
7 * from the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 extern "C" {
32 #include <sys/param.h>
33 #include <sys/mman.h>
34 #include <sys/socket.h>
35 #include <sys/sysctl.h>
36 #include <sys/uio.h>
37
38 #include <aio.h>
39 #include <fcntl.h>
40 #include <semaphore.h>
41 #include <setjmp.h>
42 #include <signal.h>
43 #include <unistd.h>
44 }
45
46 #include "mockfs.hh"
47 #include "utils.hh"
48
49 using namespace testing;
50
51 class Read: public FuseTest {
52
53 public:
expect_lookup(const char * relpath,uint64_t ino,uint64_t size)54 void expect_lookup(const char *relpath, uint64_t ino, uint64_t size)
55 {
56 FuseTest::expect_lookup(relpath, ino, S_IFREG | 0644, size, 1);
57 }
58 };
59
60 class RofsRead: public Read {
61 public:
SetUp()62 virtual void SetUp() {
63 m_ro = true;
64 Read::SetUp();
65 }
66 };
67
68 class Read_7_8: public FuseTest {
69 public:
SetUp()70 virtual void SetUp() {
71 m_kernel_minor_version = 8;
72 FuseTest::SetUp();
73 }
74
expect_lookup(const char * relpath,uint64_t ino,uint64_t size)75 void expect_lookup(const char *relpath, uint64_t ino, uint64_t size)
76 {
77 FuseTest::expect_lookup_7_8(relpath, ino, S_IFREG | 0644, size, 1);
78 }
79 };
80
81 class AioRead: public Read {
82 public:
SetUp()83 virtual void SetUp() {
84 if (!is_unsafe_aio_enabled())
85 GTEST_SKIP() <<
86 "vfs.aio.enable_unsafe must be set for this test";
87 FuseTest::SetUp();
88 }
89 };
90
91 class AsyncRead: public AioRead {
SetUp()92 virtual void SetUp() {
93 m_init_flags = FUSE_ASYNC_READ;
94 AioRead::SetUp();
95 }
96 };
97
98 class AsyncReadNoAttrCache: public Read {
SetUp()99 virtual void SetUp() {
100 m_init_flags = FUSE_ASYNC_READ;
101 Read::SetUp();
102 }
103 public:
expect_lookup(const char * relpath,uint64_t ino)104 void expect_lookup(const char *relpath, uint64_t ino)
105 {
106 // Don't return size, and set attr_valid=0
107 FuseTest::expect_lookup(relpath, ino, S_IFREG | 0644, 0, 1, 0);
108 }
109 };
110
111 class ReadAhead: public Read,
112 public WithParamInterface<tuple<bool, int>>
113 {
SetUp()114 virtual void SetUp() {
115 int val;
116 const char *node = "vfs.maxbcachebuf";
117 size_t size = sizeof(val);
118 ASSERT_EQ(0, sysctlbyname(node, &val, &size, NULL, 0))
119 << strerror(errno);
120
121 m_maxreadahead = val * get<1>(GetParam());
122 m_noclusterr = get<0>(GetParam());
123 Read::SetUp();
124 }
125 };
126
127 class ReadMaxRead: public Read {
SetUp()128 virtual void SetUp() {
129 m_maxread = 16384;
130 Read::SetUp();
131 }
132 };
133
134 class ReadNoatime: public Read {
SetUp()135 virtual void SetUp() {
136 m_noatime = true;
137 Read::SetUp();
138 }
139 };
140
141 class ReadSigbus: public Read
142 {
143 public:
144 static jmp_buf s_jmpbuf;
145 static void *s_si_addr;
146
TearDown()147 void TearDown() {
148 struct sigaction sa;
149
150 bzero(&sa, sizeof(sa));
151 sa.sa_handler = SIG_DFL;
152 sigaction(SIGBUS, &sa, NULL);
153
154 FuseTest::TearDown();
155 }
156
157 };
158
159 static void
handle_sigbus(int signo __unused,siginfo_t * info,void * uap __unused)160 handle_sigbus(int signo __unused, siginfo_t *info, void *uap __unused) {
161 ReadSigbus::s_si_addr = info->si_addr;
162 longjmp(ReadSigbus::s_jmpbuf, 1);
163 }
164
165 jmp_buf ReadSigbus::s_jmpbuf;
166 void *ReadSigbus::s_si_addr;
167
168 class TimeGran: public Read, public WithParamInterface<unsigned> {
169 public:
SetUp()170 virtual void SetUp() {
171 m_time_gran = 1 << GetParam();
172 Read::SetUp();
173 }
174 };
175
176 /* AIO reads need to set the header's pid field correctly */
177 /* https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=236379 */
TEST_F(AioRead,aio_read)178 TEST_F(AioRead, aio_read)
179 {
180 const char FULLPATH[] = "mountpoint/some_file.txt";
181 const char RELPATH[] = "some_file.txt";
182 const char *CONTENTS = "abcdefgh";
183 uint64_t ino = 42;
184 int fd;
185 ssize_t bufsize = strlen(CONTENTS);
186 uint8_t buf[bufsize];
187 struct aiocb iocb, *piocb;
188
189 expect_lookup(RELPATH, ino, bufsize);
190 expect_open(ino, 0, 1);
191 expect_read(ino, 0, bufsize, bufsize, CONTENTS);
192
193 fd = open(FULLPATH, O_RDONLY);
194 ASSERT_LE(0, fd) << strerror(errno);
195
196 iocb.aio_nbytes = bufsize;
197 iocb.aio_fildes = fd;
198 iocb.aio_buf = buf;
199 iocb.aio_offset = 0;
200 iocb.aio_sigevent.sigev_notify = SIGEV_NONE;
201 ASSERT_EQ(0, aio_read(&iocb)) << strerror(errno);
202 ASSERT_EQ(bufsize, aio_waitcomplete(&piocb, NULL)) << strerror(errno);
203 ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
204
205 leak(fd);
206 }
207
208 /*
209 * Without the FUSE_ASYNC_READ mount option, fuse(4) should ensure that there
210 * is at most one outstanding read operation per file handle
211 */
TEST_F(AioRead,async_read_disabled)212 TEST_F(AioRead, async_read_disabled)
213 {
214 const char FULLPATH[] = "mountpoint/some_file.txt";
215 const char RELPATH[] = "some_file.txt";
216 uint64_t ino = 42;
217 int fd;
218 ssize_t bufsize = 50;
219 char buf0[bufsize], buf1[bufsize];
220 off_t off0 = 0;
221 off_t off1 = m_maxbcachebuf;
222 struct aiocb iocb0, iocb1;
223 volatile sig_atomic_t read_count = 0;
224
225 expect_lookup(RELPATH, ino, 131072);
226 expect_open(ino, 0, 1);
227 EXPECT_CALL(*m_mock, process(
228 ResultOf([=](auto in) {
229 return (in.header.opcode == FUSE_READ &&
230 in.header.nodeid == ino &&
231 in.body.read.fh == FH &&
232 in.body.read.offset == (uint64_t)off0);
233 }, Eq(true)),
234 _)
235 ).WillRepeatedly(Invoke([&](auto in __unused, auto &out __unused) {
236 read_count++;
237 /* Filesystem is slow to respond */
238 }));
239 EXPECT_CALL(*m_mock, process(
240 ResultOf([=](auto in) {
241 return (in.header.opcode == FUSE_READ &&
242 in.header.nodeid == ino &&
243 in.body.read.fh == FH &&
244 in.body.read.offset == (uint64_t)off1);
245 }, Eq(true)),
246 _)
247 ).WillRepeatedly(Invoke([&](auto in __unused, auto &out __unused) {
248 read_count++;
249 /* Filesystem is slow to respond */
250 }));
251
252 fd = open(FULLPATH, O_RDONLY);
253 ASSERT_LE(0, fd) << strerror(errno);
254
255 /*
256 * Submit two AIO read requests, and respond to neither. If the
257 * filesystem ever gets the second read request, then we failed to
258 * limit outstanding reads.
259 */
260 iocb0.aio_nbytes = bufsize;
261 iocb0.aio_fildes = fd;
262 iocb0.aio_buf = buf0;
263 iocb0.aio_offset = off0;
264 iocb0.aio_sigevent.sigev_notify = SIGEV_NONE;
265 ASSERT_EQ(0, aio_read(&iocb0)) << strerror(errno);
266
267 iocb1.aio_nbytes = bufsize;
268 iocb1.aio_fildes = fd;
269 iocb1.aio_buf = buf1;
270 iocb1.aio_offset = off1;
271 iocb1.aio_sigevent.sigev_notify = SIGEV_NONE;
272 ASSERT_EQ(0, aio_read(&iocb1)) << strerror(errno);
273
274 /*
275 * Sleep for awhile to make sure the kernel has had a chance to issue
276 * the second read, even though the first has not yet returned
277 */
278 nap();
279 EXPECT_EQ(read_count, 1);
280
281 m_mock->kill_daemon();
282 /* Wait for AIO activity to complete, but ignore errors */
283 (void)aio_waitcomplete(NULL, NULL);
284
285 leak(fd);
286 }
287
288 /*
289 * With the FUSE_ASYNC_READ mount option, fuse(4) may issue multiple
290 * simultaneous read requests on the same file handle.
291 */
TEST_F(AsyncRead,async_read)292 TEST_F(AsyncRead, async_read)
293 {
294 const char FULLPATH[] = "mountpoint/some_file.txt";
295 const char RELPATH[] = "some_file.txt";
296 uint64_t ino = 42;
297 int fd;
298 ssize_t bufsize = 50;
299 char buf0[bufsize], buf1[bufsize];
300 off_t off0 = 0;
301 off_t off1 = m_maxbcachebuf;
302 off_t fsize = 2 * m_maxbcachebuf;
303 struct aiocb iocb0, iocb1;
304 sem_t sem;
305
306 ASSERT_EQ(0, sem_init(&sem, 0, 0)) << strerror(errno);
307
308 expect_lookup(RELPATH, ino, fsize);
309 expect_open(ino, 0, 1);
310 EXPECT_CALL(*m_mock, process(
311 ResultOf([=](auto in) {
312 return (in.header.opcode == FUSE_READ &&
313 in.header.nodeid == ino &&
314 in.body.read.fh == FH &&
315 in.body.read.offset == (uint64_t)off0);
316 }, Eq(true)),
317 _)
318 ).WillOnce(Invoke([&](auto in __unused, auto &out __unused) {
319 sem_post(&sem);
320 /* Filesystem is slow to respond */
321 }));
322 EXPECT_CALL(*m_mock, process(
323 ResultOf([=](auto in) {
324 return (in.header.opcode == FUSE_READ &&
325 in.header.nodeid == ino &&
326 in.body.read.fh == FH &&
327 in.body.read.offset == (uint64_t)off1);
328 }, Eq(true)),
329 _)
330 ).WillOnce(Invoke([&](auto in __unused, auto &out __unused) {
331 sem_post(&sem);
332 /* Filesystem is slow to respond */
333 }));
334
335 fd = open(FULLPATH, O_RDONLY);
336 ASSERT_LE(0, fd) << strerror(errno);
337
338 /*
339 * Submit two AIO read requests, but respond to neither. Ensure that
340 * we received both.
341 */
342 iocb0.aio_nbytes = bufsize;
343 iocb0.aio_fildes = fd;
344 iocb0.aio_buf = buf0;
345 iocb0.aio_offset = off0;
346 iocb0.aio_sigevent.sigev_notify = SIGEV_NONE;
347 ASSERT_EQ(0, aio_read(&iocb0)) << strerror(errno);
348
349 iocb1.aio_nbytes = bufsize;
350 iocb1.aio_fildes = fd;
351 iocb1.aio_buf = buf1;
352 iocb1.aio_offset = off1;
353 iocb1.aio_sigevent.sigev_notify = SIGEV_NONE;
354 ASSERT_EQ(0, aio_read(&iocb1)) << strerror(errno);
355
356 /* Wait until both reads have reached the daemon */
357 ASSERT_EQ(0, sem_wait(&sem)) << strerror(errno);
358 ASSERT_EQ(0, sem_wait(&sem)) << strerror(errno);
359
360 m_mock->kill_daemon();
361 /* Wait for AIO activity to complete, but ignore errors */
362 (void)aio_waitcomplete(NULL, NULL);
363
364 leak(fd);
365 }
366
367 /*
368 * Regression test for a VFS locking bug: as of
369 * 22bb70a6b3bb7799276ab480e40665b7d6e4ce25 (17-December-2024), fusefs did not
370 * obtain an exclusive vnode lock before attempting to clear the attr cache
371 * after an unexpected eof. The vnode lock would already be exclusive except
372 * when FUSE_ASYNC_READ is set.
373 * https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=283391
374 */
TEST_F(AsyncRead,eof)375 TEST_F(AsyncRead, eof)
376 {
377 const char FULLPATH[] = "mountpoint/some_file.txt";
378 const char RELPATH[] = "some_file.txt";
379 const char *CONTENTS = "abcdefghijklmnop";
380 uint64_t ino = 42;
381 int fd;
382 uint64_t offset = 100;
383 ssize_t bufsize = strlen(CONTENTS);
384 ssize_t partbufsize = 3 * bufsize / 4;
385 ssize_t r;
386 uint8_t buf[bufsize];
387 struct stat sb;
388
389 expect_lookup(RELPATH, ino, offset + bufsize);
390 expect_open(ino, 0, 1);
391 expect_read(ino, 0, offset + bufsize, offset + partbufsize, CONTENTS);
392 expect_getattr(ino, offset + partbufsize);
393
394 fd = open(FULLPATH, O_RDONLY);
395 ASSERT_LE(0, fd) << strerror(errno);
396
397 r = pread(fd, buf, bufsize, offset);
398 ASSERT_LE(0, r) << strerror(errno);
399 EXPECT_EQ(partbufsize, r) << strerror(errno);
400 ASSERT_EQ(0, fstat(fd, &sb));
401 EXPECT_EQ((off_t)(offset + partbufsize), sb.st_size);
402 leak(fd);
403 }
404
405 /*
406 * If the daemon disables the attribute cache (or if it has expired), then the
407 * kernel must fetch attributes during VOP_READ. If async reads are enabled,
408 * then fuse_internal_cache_attrs will be called without the vnode exclusively
409 * locked. Regression test for
410 * https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=291064
411 */
TEST_F(AsyncReadNoAttrCache,read)412 TEST_F(AsyncReadNoAttrCache, read)
413 {
414 const char FULLPATH[] = "mountpoint/some_file.txt";
415 const char RELPATH[] = "some_file.txt";
416 const char *CONTENTS = "abcdefgh";
417 uint64_t ino = 42;
418 mode_t mode = S_IFREG | 0644;
419 int fd;
420 ssize_t bufsize = strlen(CONTENTS);
421 uint8_t buf[bufsize];
422
423 expect_lookup(RELPATH, ino);
424 expect_open(ino, 0, 1);
425 EXPECT_CALL(*m_mock, process(
426 ResultOf([=](auto in) {
427 return (in.header.opcode == FUSE_GETATTR &&
428 in.header.nodeid == ino);
429 }, Eq(true)),
430 _)
431 ).WillRepeatedly(Invoke(ReturnImmediate([=](auto i __unused, auto& out)
432 {
433 SET_OUT_HEADER_LEN(out, attr);
434 out.body.attr.attr.ino = ino;
435 out.body.attr.attr.mode = mode;
436 out.body.attr.attr.size = bufsize;
437 out.body.attr.attr_valid = 0;
438 })));
439 expect_read(ino, 0, bufsize, bufsize, CONTENTS);
440
441 fd = open(FULLPATH, O_RDONLY);
442 ASSERT_LE(0, fd) << strerror(errno);
443
444 ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
445 ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
446
447 leak(fd);
448 }
449
450 /*
451 * If the vnode's attr cache has expired before VOP_READ begins, the kernel
452 * will have to fetch the file's size from the server. If it has changed since
453 * our last query, we'll need to update the vnode pager. But we'll only have a
454 * shared vnode lock.
455 */
TEST_F(AsyncReadNoAttrCache,read_sizechange)456 TEST_F(AsyncReadNoAttrCache, read_sizechange)
457 {
458 const char FULLPATH[] = "mountpoint/some_file.txt";
459 const char RELPATH[] = "some_file.txt";
460 const char *CONTENTS = "abcdefgh";
461 uint64_t ino = 42;
462 mode_t mode = S_IFREG | 0644;
463 int fd;
464 size_t bufsize = strlen(CONTENTS);
465 uint8_t buf[bufsize];
466 size_t size1 = bufsize - 1;
467 size_t size2 = bufsize;
468 Sequence seq;
469
470 expect_lookup(RELPATH, ino);
471 expect_open(ino, 0, 1);
472 EXPECT_CALL(*m_mock, process(
473 ResultOf([=](auto in) {
474 return (in.header.opcode == FUSE_GETATTR &&
475 in.header.nodeid == ino);
476 }, Eq(true)),
477 _)
478 ).Times(2)
479 .InSequence(seq)
480 .WillRepeatedly(Invoke(ReturnImmediate([=](auto i __unused, auto& out)
481 {
482 SET_OUT_HEADER_LEN(out, attr);
483 out.body.attr.attr.ino = ino;
484 out.body.attr.attr.mode = mode;
485 out.body.attr.attr.size = size1;
486 out.body.attr.attr_valid = 0;
487 })));
488 EXPECT_CALL(*m_mock, process(
489 ResultOf([=](auto in) {
490 return (in.header.opcode == FUSE_READ &&
491 in.header.nodeid == ino &&
492 in.body.read.offset == 0 &&
493 in.body.read.size == size1);
494 }, Eq(true)),
495 _)
496 ).Times(1)
497 .InSequence(seq)
498 .WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
499 out.header.len = sizeof(struct fuse_out_header) + size1;
500 memmove(out.body.bytes, CONTENTS, size1);
501 })));
502 EXPECT_CALL(*m_mock, process(
503 ResultOf([=](auto in) {
504 return (in.header.opcode == FUSE_GETATTR &&
505 in.header.nodeid == ino);
506 }, Eq(true)),
507 _)
508 ).InSequence(seq)
509 .WillOnce(Invoke(ReturnImmediate([=](auto i __unused, auto& out)
510 {
511 SET_OUT_HEADER_LEN(out, attr);
512 out.body.attr.attr.ino = ino;
513 out.body.attr.attr.mode = mode;
514 out.body.attr.attr.size = size2;
515 out.body.attr.attr_valid = 0;
516 })));
517 EXPECT_CALL(*m_mock, process(
518 ResultOf([=](auto in) {
519 return (in.header.opcode == FUSE_READ &&
520 in.header.nodeid == ino &&
521 in.body.read.offset == 0 &&
522 in.body.read.size == size2);
523 }, Eq(true)),
524 _)
525 ).Times(1)
526 .InSequence(seq)
527 .WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
528 out.header.len = sizeof(struct fuse_out_header) + size2;
529 memmove(out.body.bytes, CONTENTS, size2);
530 })));
531
532 fd = open(FULLPATH, O_RDONLY);
533 ASSERT_LE(0, fd) << strerror(errno);
534
535 ASSERT_EQ(static_cast<ssize_t>(size1), read(fd, buf, bufsize)) << strerror(errno);
536 ASSERT_EQ(0, memcmp(buf, CONTENTS, size1));
537
538 /* Read again, but this time the server has changed the file's size */
539 bzero(buf, size2);
540 ASSERT_EQ(static_cast<ssize_t>(size2), pread(fd, buf, bufsize, 0)) << strerror(errno);
541 ASSERT_EQ(0, memcmp(buf, CONTENTS, size2));
542
543 leak(fd);
544 }
545
546 /* The kernel should update the cached atime attribute during a read */
TEST_F(Read,atime)547 TEST_F(Read, atime)
548 {
549 const char FULLPATH[] = "mountpoint/some_file.txt";
550 const char RELPATH[] = "some_file.txt";
551 const char *CONTENTS = "abcdefgh";
552 struct stat sb1, sb2;
553 uint64_t ino = 42;
554 int fd;
555 ssize_t bufsize = strlen(CONTENTS);
556 uint8_t buf[bufsize];
557
558 expect_lookup(RELPATH, ino, bufsize);
559 expect_open(ino, 0, 1);
560 expect_read(ino, 0, bufsize, bufsize, CONTENTS);
561
562 fd = open(FULLPATH, O_RDONLY);
563 ASSERT_LE(0, fd) << strerror(errno);
564 ASSERT_EQ(0, fstat(fd, &sb1));
565
566 /* Ensure atime will be different than it was during lookup */
567 nap();
568
569 ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
570 ASSERT_EQ(0, fstat(fd, &sb2));
571
572 /* The kernel should automatically update atime during read */
573 EXPECT_TRUE(timespeccmp(&sb1.st_atim, &sb2.st_atim, <));
574 EXPECT_TRUE(timespeccmp(&sb1.st_ctim, &sb2.st_ctim, ==));
575 EXPECT_TRUE(timespeccmp(&sb1.st_mtim, &sb2.st_mtim, ==));
576
577 leak(fd);
578 }
579
580 /* The kernel should update the cached atime attribute during a cached read */
TEST_F(Read,atime_cached)581 TEST_F(Read, atime_cached)
582 {
583 const char FULLPATH[] = "mountpoint/some_file.txt";
584 const char RELPATH[] = "some_file.txt";
585 const char *CONTENTS = "abcdefgh";
586 struct stat sb1, sb2;
587 uint64_t ino = 42;
588 int fd;
589 ssize_t bufsize = strlen(CONTENTS);
590 uint8_t buf[bufsize];
591
592 expect_lookup(RELPATH, ino, bufsize);
593 expect_open(ino, 0, 1);
594 expect_read(ino, 0, bufsize, bufsize, CONTENTS);
595
596 fd = open(FULLPATH, O_RDONLY);
597 ASSERT_LE(0, fd) << strerror(errno);
598
599 ASSERT_EQ(bufsize, pread(fd, buf, bufsize, 0)) << strerror(errno);
600 ASSERT_EQ(0, fstat(fd, &sb1));
601
602 /* Ensure atime will be different than it was during the first read */
603 nap();
604
605 ASSERT_EQ(bufsize, pread(fd, buf, bufsize, 0)) << strerror(errno);
606 ASSERT_EQ(0, fstat(fd, &sb2));
607
608 /* The kernel should automatically update atime during read */
609 EXPECT_TRUE(timespeccmp(&sb1.st_atim, &sb2.st_atim, <));
610 EXPECT_TRUE(timespeccmp(&sb1.st_ctim, &sb2.st_ctim, ==));
611 EXPECT_TRUE(timespeccmp(&sb1.st_mtim, &sb2.st_mtim, ==));
612
613 leak(fd);
614 }
615
616 /* dirty atime values should be flushed during close */
TEST_F(Read,atime_during_close)617 TEST_F(Read, atime_during_close)
618 {
619 const char FULLPATH[] = "mountpoint/some_file.txt";
620 const char RELPATH[] = "some_file.txt";
621 const char *CONTENTS = "abcdefgh";
622 struct stat sb;
623 uint64_t ino = 42;
624 const mode_t newmode = 0755;
625 int fd;
626 ssize_t bufsize = strlen(CONTENTS);
627 uint8_t buf[bufsize];
628
629 expect_lookup(RELPATH, ino, bufsize);
630 expect_open(ino, 0, 1);
631 expect_read(ino, 0, bufsize, bufsize, CONTENTS);
632 EXPECT_CALL(*m_mock, process(
633 ResultOf([&](auto in) {
634 uint32_t valid = FATTR_ATIME;
635 return (in.header.opcode == FUSE_SETATTR &&
636 in.header.nodeid == ino &&
637 in.body.setattr.valid == valid &&
638 (time_t)in.body.setattr.atime ==
639 sb.st_atim.tv_sec &&
640 (long)in.body.setattr.atimensec ==
641 sb.st_atim.tv_nsec);
642 }, Eq(true)),
643 _)
644 ).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
645 SET_OUT_HEADER_LEN(out, attr);
646 out.body.attr.attr.ino = ino;
647 out.body.attr.attr.mode = S_IFREG | newmode;
648 })));
649 expect_flush(ino, 1, ReturnErrno(0));
650 expect_release(ino, FuseTest::FH);
651
652 fd = open(FULLPATH, O_RDONLY);
653 ASSERT_LE(0, fd) << strerror(errno);
654
655 /* Ensure atime will be different than during lookup */
656 nap();
657
658 ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
659 ASSERT_EQ(0, fstat(fd, &sb));
660
661 close(fd);
662 }
663
664 /*
665 * When not using -o default_permissions, the daemon may make its own decisions
666 * regarding access permissions, and these may be unpredictable. If it rejects
667 * our attempt to set atime, that should not cause close(2) to fail.
668 */
TEST_F(Read,atime_during_close_eacces)669 TEST_F(Read, atime_during_close_eacces)
670 {
671 const char FULLPATH[] = "mountpoint/some_file.txt";
672 const char RELPATH[] = "some_file.txt";
673 const char *CONTENTS = "abcdefgh";
674 uint64_t ino = 42;
675 int fd;
676 ssize_t bufsize = strlen(CONTENTS);
677 uint8_t buf[bufsize];
678
679 expect_lookup(RELPATH, ino, bufsize);
680 expect_open(ino, 0, 1);
681 expect_read(ino, 0, bufsize, bufsize, CONTENTS);
682 EXPECT_CALL(*m_mock, process(
683 ResultOf([&](auto in) {
684 uint32_t valid = FATTR_ATIME;
685 return (in.header.opcode == FUSE_SETATTR &&
686 in.header.nodeid == ino &&
687 in.body.setattr.valid == valid);
688 }, Eq(true)),
689 _)
690 ).WillOnce(Invoke(ReturnErrno(EACCES)));
691 expect_flush(ino, 1, ReturnErrno(0));
692 expect_release(ino, FuseTest::FH);
693
694 fd = open(FULLPATH, O_RDONLY);
695 ASSERT_LE(0, fd) << strerror(errno);
696
697 /* Ensure atime will be different than during lookup */
698 nap();
699
700 ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
701
702 ASSERT_EQ(0, close(fd));
703 }
704
705 /* A cached atime should be flushed during FUSE_SETATTR */
TEST_F(Read,atime_during_setattr)706 TEST_F(Read, atime_during_setattr)
707 {
708 const char FULLPATH[] = "mountpoint/some_file.txt";
709 const char RELPATH[] = "some_file.txt";
710 const char *CONTENTS = "abcdefgh";
711 struct stat sb;
712 uint64_t ino = 42;
713 const mode_t newmode = 0755;
714 int fd;
715 ssize_t bufsize = strlen(CONTENTS);
716 uint8_t buf[bufsize];
717
718 expect_lookup(RELPATH, ino, bufsize);
719 expect_open(ino, 0, 1);
720 expect_read(ino, 0, bufsize, bufsize, CONTENTS);
721 EXPECT_CALL(*m_mock, process(
722 ResultOf([&](auto in) {
723 uint32_t valid = FATTR_MODE | FATTR_ATIME;
724 return (in.header.opcode == FUSE_SETATTR &&
725 in.header.nodeid == ino &&
726 in.body.setattr.valid == valid &&
727 (time_t)in.body.setattr.atime ==
728 sb.st_atim.tv_sec &&
729 (long)in.body.setattr.atimensec ==
730 sb.st_atim.tv_nsec);
731 }, Eq(true)),
732 _)
733 ).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
734 SET_OUT_HEADER_LEN(out, attr);
735 out.body.attr.attr.ino = ino;
736 out.body.attr.attr.mode = S_IFREG | newmode;
737 })));
738
739 fd = open(FULLPATH, O_RDONLY);
740 ASSERT_LE(0, fd) << strerror(errno);
741
742 /* Ensure atime will be different than during lookup */
743 nap();
744
745 ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
746 ASSERT_EQ(0, fstat(fd, &sb));
747 ASSERT_EQ(0, fchmod(fd, newmode)) << strerror(errno);
748
749 leak(fd);
750 }
751
752 /* 0-length reads shouldn't cause any confusion */
TEST_F(Read,direct_io_read_nothing)753 TEST_F(Read, direct_io_read_nothing)
754 {
755 const char FULLPATH[] = "mountpoint/some_file.txt";
756 const char RELPATH[] = "some_file.txt";
757 uint64_t ino = 42;
758 int fd;
759 uint64_t offset = 100;
760 char buf[80];
761
762 expect_lookup(RELPATH, ino, offset + 1000);
763 expect_open(ino, FOPEN_DIRECT_IO, 1);
764
765 fd = open(FULLPATH, O_RDONLY);
766 ASSERT_LE(0, fd) << strerror(errno);
767
768 ASSERT_EQ(0, pread(fd, buf, 0, offset)) << strerror(errno);
769 leak(fd);
770 }
771
772 /*
773 * With direct_io, reads should not fill the cache. They should go straight to
774 * the daemon
775 */
TEST_F(Read,direct_io_pread)776 TEST_F(Read, direct_io_pread)
777 {
778 const char FULLPATH[] = "mountpoint/some_file.txt";
779 const char RELPATH[] = "some_file.txt";
780 const char *CONTENTS = "abcdefgh";
781 uint64_t ino = 42;
782 int fd;
783 uint64_t offset = 100;
784 ssize_t bufsize = strlen(CONTENTS);
785 uint8_t buf[bufsize];
786
787 expect_lookup(RELPATH, ino, offset + bufsize);
788 expect_open(ino, FOPEN_DIRECT_IO, 1);
789 expect_read(ino, offset, bufsize, bufsize, CONTENTS);
790
791 fd = open(FULLPATH, O_RDONLY);
792 ASSERT_LE(0, fd) << strerror(errno);
793
794 ASSERT_EQ(bufsize, pread(fd, buf, bufsize, offset)) << strerror(errno);
795 ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
796
797 // With FOPEN_DIRECT_IO, the cache should be bypassed. The server will
798 // get a 2nd read request.
799 expect_read(ino, offset, bufsize, bufsize, CONTENTS);
800 ASSERT_EQ(bufsize, pread(fd, buf, bufsize, offset)) << strerror(errno);
801 ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
802 leak(fd);
803 }
804
805 /*
806 * With direct_io, filesystems are allowed to return less data than is
807 * requested. fuse(4) should return a short read to userland.
808 */
TEST_F(Read,direct_io_short_read)809 TEST_F(Read, direct_io_short_read)
810 {
811 const char FULLPATH[] = "mountpoint/some_file.txt";
812 const char RELPATH[] = "some_file.txt";
813 const char *CONTENTS = "abcdefghijklmnop";
814 uint64_t ino = 42;
815 int fd;
816 uint64_t offset = 100;
817 ssize_t bufsize = strlen(CONTENTS);
818 ssize_t halfbufsize = bufsize / 2;
819 uint8_t buf[bufsize];
820
821 expect_lookup(RELPATH, ino, offset + bufsize);
822 expect_open(ino, FOPEN_DIRECT_IO, 1);
823 expect_read(ino, offset, bufsize, halfbufsize, CONTENTS);
824
825 fd = open(FULLPATH, O_RDONLY);
826 ASSERT_LE(0, fd) << strerror(errno);
827
828 ASSERT_EQ(halfbufsize, pread(fd, buf, bufsize, offset))
829 << strerror(errno);
830 ASSERT_EQ(0, memcmp(buf, CONTENTS, halfbufsize));
831 leak(fd);
832 }
833
TEST_F(Read,eio)834 TEST_F(Read, eio)
835 {
836 const char FULLPATH[] = "mountpoint/some_file.txt";
837 const char RELPATH[] = "some_file.txt";
838 const char *CONTENTS = "abcdefgh";
839 uint64_t ino = 42;
840 int fd;
841 ssize_t bufsize = strlen(CONTENTS);
842 uint8_t buf[bufsize];
843
844 expect_lookup(RELPATH, ino, bufsize);
845 expect_open(ino, 0, 1);
846 EXPECT_CALL(*m_mock, process(
847 ResultOf([=](auto in) {
848 return (in.header.opcode == FUSE_READ);
849 }, Eq(true)),
850 _)
851 ).WillOnce(Invoke(ReturnErrno(EIO)));
852
853 fd = open(FULLPATH, O_RDONLY);
854 ASSERT_LE(0, fd) << strerror(errno);
855
856 ASSERT_EQ(-1, read(fd, buf, bufsize)) << strerror(errno);
857 ASSERT_EQ(EIO, errno);
858 leak(fd);
859 }
860
861 /*
862 * If the server returns a short read when direct io is not in use, that
863 * indicates EOF, because of a server-side truncation. We should invalidate
864 * all cached attributes. We may update the file size,
865 */
TEST_F(Read,eof)866 TEST_F(Read, eof)
867 {
868 const char FULLPATH[] = "mountpoint/some_file.txt";
869 const char RELPATH[] = "some_file.txt";
870 const char *CONTENTS = "abcdefghijklmnop";
871 uint64_t ino = 42;
872 int fd;
873 uint64_t offset = 100;
874 ssize_t bufsize = strlen(CONTENTS);
875 ssize_t partbufsize = 3 * bufsize / 4;
876 ssize_t r;
877 uint8_t buf[bufsize];
878 struct stat sb;
879
880 expect_lookup(RELPATH, ino, offset + bufsize);
881 expect_open(ino, 0, 1);
882 expect_read(ino, 0, offset + bufsize, offset + partbufsize, CONTENTS);
883 expect_getattr(ino, offset + partbufsize);
884
885 fd = open(FULLPATH, O_RDONLY);
886 ASSERT_LE(0, fd) << strerror(errno);
887
888 r = pread(fd, buf, bufsize, offset);
889 ASSERT_LE(0, r) << strerror(errno);
890 EXPECT_EQ(partbufsize, r) << strerror(errno);
891 ASSERT_EQ(0, fstat(fd, &sb));
892 EXPECT_EQ((off_t)(offset + partbufsize), sb.st_size);
893 leak(fd);
894 }
895
896 /* Like Read.eof, but causes an entire buffer to be invalidated */
TEST_F(Read,eof_of_whole_buffer)897 TEST_F(Read, eof_of_whole_buffer)
898 {
899 const char FULLPATH[] = "mountpoint/some_file.txt";
900 const char RELPATH[] = "some_file.txt";
901 const char *CONTENTS = "abcdefghijklmnop";
902 uint64_t ino = 42;
903 int fd;
904 ssize_t bufsize = strlen(CONTENTS);
905 off_t old_filesize = m_maxbcachebuf * 2 + bufsize;
906 uint8_t buf[bufsize];
907 struct stat sb;
908
909 expect_lookup(RELPATH, ino, old_filesize);
910 expect_open(ino, 0, 1);
911 expect_read(ino, 2 * m_maxbcachebuf, bufsize, bufsize, CONTENTS);
912 expect_read(ino, m_maxbcachebuf, m_maxbcachebuf, 0, CONTENTS);
913 expect_getattr(ino, m_maxbcachebuf);
914
915 fd = open(FULLPATH, O_RDONLY);
916 ASSERT_LE(0, fd) << strerror(errno);
917
918 /* Cache the third block */
919 ASSERT_EQ(bufsize, pread(fd, buf, bufsize, m_maxbcachebuf * 2))
920 << strerror(errno);
921 /* Try to read the 2nd block, but it's past EOF */
922 ASSERT_EQ(0, pread(fd, buf, bufsize, m_maxbcachebuf))
923 << strerror(errno);
924 ASSERT_EQ(0, fstat(fd, &sb));
925 EXPECT_EQ((off_t)(m_maxbcachebuf), sb.st_size);
926 leak(fd);
927 }
928
929 /*
930 * With the keep_cache option, the kernel may keep its read cache across
931 * multiple open(2)s.
932 */
TEST_F(Read,keep_cache)933 TEST_F(Read, keep_cache)
934 {
935 const char FULLPATH[] = "mountpoint/some_file.txt";
936 const char RELPATH[] = "some_file.txt";
937 const char *CONTENTS = "abcdefgh";
938 uint64_t ino = 42;
939 int fd0, fd1;
940 ssize_t bufsize = strlen(CONTENTS);
941 uint8_t buf[bufsize];
942
943 FuseTest::expect_lookup(RELPATH, ino, S_IFREG | 0644, bufsize, 2);
944 expect_open(ino, FOPEN_KEEP_CACHE, 2);
945 expect_read(ino, 0, bufsize, bufsize, CONTENTS);
946
947 fd0 = open(FULLPATH, O_RDONLY);
948 ASSERT_LE(0, fd0) << strerror(errno);
949 ASSERT_EQ(bufsize, read(fd0, buf, bufsize)) << strerror(errno);
950
951 fd1 = open(FULLPATH, O_RDWR);
952 ASSERT_LE(0, fd1) << strerror(errno);
953
954 /*
955 * This read should be serviced by cache, even though it's on the other
956 * file descriptor
957 */
958 ASSERT_EQ(bufsize, read(fd1, buf, bufsize)) << strerror(errno);
959
960 leak(fd0);
961 leak(fd1);
962 }
963
964 /*
965 * Without the keep_cache option, the kernel should drop its read caches on
966 * every open
967 */
TEST_F(Read,keep_cache_disabled)968 TEST_F(Read, keep_cache_disabled)
969 {
970 const char FULLPATH[] = "mountpoint/some_file.txt";
971 const char RELPATH[] = "some_file.txt";
972 const char *CONTENTS = "abcdefgh";
973 uint64_t ino = 42;
974 int fd0, fd1;
975 ssize_t bufsize = strlen(CONTENTS);
976 uint8_t buf[bufsize];
977
978 FuseTest::expect_lookup(RELPATH, ino, S_IFREG | 0644, bufsize, 2);
979 expect_open(ino, 0, 2);
980 expect_read(ino, 0, bufsize, bufsize, CONTENTS);
981
982 fd0 = open(FULLPATH, O_RDONLY);
983 ASSERT_LE(0, fd0) << strerror(errno);
984 ASSERT_EQ(bufsize, read(fd0, buf, bufsize)) << strerror(errno);
985
986 fd1 = open(FULLPATH, O_RDWR);
987 ASSERT_LE(0, fd1) << strerror(errno);
988
989 /*
990 * This read should not be serviced by cache, even though it's on the
991 * original file descriptor
992 */
993 expect_read(ino, 0, bufsize, bufsize, CONTENTS);
994 ASSERT_EQ(0, lseek(fd0, 0, SEEK_SET)) << strerror(errno);
995 ASSERT_EQ(bufsize, read(fd0, buf, bufsize)) << strerror(errno);
996
997 leak(fd0);
998 leak(fd1);
999 }
1000
TEST_F(Read,mmap)1001 TEST_F(Read, mmap)
1002 {
1003 const char FULLPATH[] = "mountpoint/some_file.txt";
1004 const char RELPATH[] = "some_file.txt";
1005 const char *CONTENTS = "abcdefgh";
1006 uint64_t ino = 42;
1007 int fd;
1008 ssize_t len;
1009 size_t bufsize = strlen(CONTENTS);
1010 void *p;
1011
1012 len = getpagesize();
1013
1014 expect_lookup(RELPATH, ino, bufsize);
1015 expect_open(ino, 0, 1);
1016 EXPECT_CALL(*m_mock, process(
1017 ResultOf([=](auto in) {
1018 return (in.header.opcode == FUSE_READ &&
1019 in.header.nodeid == ino &&
1020 in.body.read.fh == Read::FH &&
1021 in.body.read.offset == 0 &&
1022 in.body.read.size == bufsize);
1023 }, Eq(true)),
1024 _)
1025 ).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
1026 out.header.len = sizeof(struct fuse_out_header) + bufsize;
1027 memmove(out.body.bytes, CONTENTS, bufsize);
1028 })));
1029
1030 fd = open(FULLPATH, O_RDONLY);
1031 ASSERT_LE(0, fd) << strerror(errno);
1032
1033 p = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
1034 ASSERT_NE(MAP_FAILED, p) << strerror(errno);
1035
1036 ASSERT_EQ(0, memcmp(p, CONTENTS, bufsize));
1037
1038 ASSERT_EQ(0, munmap(p, len)) << strerror(errno);
1039 leak(fd);
1040 }
1041
1042
1043 /* When max_read is set, large reads will be split up as necessary */
TEST_F(ReadMaxRead,split)1044 TEST_F(ReadMaxRead, split)
1045 {
1046 const char FULLPATH[] = "mountpoint/some_file.txt";
1047 const char RELPATH[] = "some_file.txt";
1048 uint64_t ino = 42;
1049 int fd;
1050 ssize_t bufsize = 65536;
1051 ssize_t fragsize = bufsize / 4;
1052 char *rbuf, *frag0, *frag1, *frag2, *frag3;
1053
1054 rbuf = new char[bufsize]();
1055 frag0 = new char[fragsize]();
1056 frag1 = new char[fragsize]();
1057 frag2 = new char[fragsize]();
1058 frag3 = new char[fragsize]();
1059 memset(frag0, '0', fragsize);
1060 memset(frag1, '1', fragsize);
1061 memset(frag2, '2', fragsize);
1062 memset(frag3, '3', fragsize);
1063
1064 expect_lookup(RELPATH, ino, bufsize);
1065 expect_open(ino, 0, 1);
1066 expect_read(ino, 0, fragsize, fragsize, frag0);
1067 expect_read(ino, fragsize, fragsize, fragsize, frag1);
1068 expect_read(ino, 2 * fragsize, fragsize, fragsize, frag2);
1069 expect_read(ino, 3 * fragsize, fragsize, fragsize, frag3);
1070
1071 fd = open(FULLPATH, O_RDONLY);
1072 ASSERT_LE(0, fd) << strerror(errno);
1073
1074 ASSERT_EQ(bufsize, read(fd, rbuf, bufsize)) << strerror(errno);
1075 ASSERT_EQ(0, memcmp(rbuf, frag0, fragsize));
1076 ASSERT_EQ(0, memcmp(rbuf + fragsize, frag1, fragsize));
1077 ASSERT_EQ(0, memcmp(rbuf + 2 * fragsize, frag2, fragsize));
1078 ASSERT_EQ(0, memcmp(rbuf + 3 * fragsize, frag3, fragsize));
1079
1080 delete[] frag3;
1081 delete[] frag2;
1082 delete[] frag1;
1083 delete[] frag0;
1084 delete[] rbuf;
1085 leak(fd);
1086 }
1087
1088 /*
1089 * The kernel should not update the cached atime attribute during a read, if
1090 * MNT_NOATIME is used.
1091 */
TEST_F(ReadNoatime,atime)1092 TEST_F(ReadNoatime, atime)
1093 {
1094 const char FULLPATH[] = "mountpoint/some_file.txt";
1095 const char RELPATH[] = "some_file.txt";
1096 const char *CONTENTS = "abcdefgh";
1097 struct stat sb1, sb2;
1098 uint64_t ino = 42;
1099 int fd;
1100 ssize_t bufsize = strlen(CONTENTS);
1101 uint8_t buf[bufsize];
1102
1103 expect_lookup(RELPATH, ino, bufsize);
1104 expect_open(ino, 0, 1);
1105 expect_read(ino, 0, bufsize, bufsize, CONTENTS);
1106
1107 fd = open(FULLPATH, O_RDONLY);
1108 ASSERT_LE(0, fd) << strerror(errno);
1109 ASSERT_EQ(0, fstat(fd, &sb1));
1110
1111 nap();
1112
1113 ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
1114 ASSERT_EQ(0, fstat(fd, &sb2));
1115
1116 /* The kernel should not update atime during read */
1117 EXPECT_TRUE(timespeccmp(&sb1.st_atim, &sb2.st_atim, ==));
1118 EXPECT_TRUE(timespeccmp(&sb1.st_ctim, &sb2.st_ctim, ==));
1119 EXPECT_TRUE(timespeccmp(&sb1.st_mtim, &sb2.st_mtim, ==));
1120
1121 leak(fd);
1122 }
1123
1124 /*
1125 * The kernel should not update the cached atime attribute during a cached
1126 * read, if MNT_NOATIME is used.
1127 */
TEST_F(ReadNoatime,atime_cached)1128 TEST_F(ReadNoatime, atime_cached)
1129 {
1130 const char FULLPATH[] = "mountpoint/some_file.txt";
1131 const char RELPATH[] = "some_file.txt";
1132 const char *CONTENTS = "abcdefgh";
1133 struct stat sb1, sb2;
1134 uint64_t ino = 42;
1135 int fd;
1136 ssize_t bufsize = strlen(CONTENTS);
1137 uint8_t buf[bufsize];
1138
1139 expect_lookup(RELPATH, ino, bufsize);
1140 expect_open(ino, 0, 1);
1141 expect_read(ino, 0, bufsize, bufsize, CONTENTS);
1142
1143 fd = open(FULLPATH, O_RDONLY);
1144 ASSERT_LE(0, fd) << strerror(errno);
1145
1146 ASSERT_EQ(bufsize, pread(fd, buf, bufsize, 0)) << strerror(errno);
1147 ASSERT_EQ(0, fstat(fd, &sb1));
1148
1149 nap();
1150
1151 ASSERT_EQ(bufsize, pread(fd, buf, bufsize, 0)) << strerror(errno);
1152 ASSERT_EQ(0, fstat(fd, &sb2));
1153
1154 /* The kernel should automatically update atime during read */
1155 EXPECT_TRUE(timespeccmp(&sb1.st_atim, &sb2.st_atim, ==));
1156 EXPECT_TRUE(timespeccmp(&sb1.st_ctim, &sb2.st_ctim, ==));
1157 EXPECT_TRUE(timespeccmp(&sb1.st_mtim, &sb2.st_mtim, ==));
1158
1159 leak(fd);
1160 }
1161
1162 /* Read of an mmap()ed file fails */
TEST_F(ReadSigbus,mmap_eio)1163 TEST_F(ReadSigbus, mmap_eio)
1164 {
1165 const char FULLPATH[] = "mountpoint/some_file.txt";
1166 const char RELPATH[] = "some_file.txt";
1167 const char *CONTENTS = "abcdefgh";
1168 struct sigaction sa;
1169 uint64_t ino = 42;
1170 int fd;
1171 ssize_t len;
1172 size_t bufsize = strlen(CONTENTS);
1173 void *p;
1174
1175 len = getpagesize();
1176
1177 expect_lookup(RELPATH, ino, bufsize);
1178 expect_open(ino, 0, 1);
1179 EXPECT_CALL(*m_mock, process(
1180 ResultOf([=](auto in) {
1181 return (in.header.opcode == FUSE_READ &&
1182 in.header.nodeid == ino &&
1183 in.body.read.fh == Read::FH);
1184 }, Eq(true)),
1185 _)
1186 ).WillRepeatedly(Invoke(ReturnErrno(EIO)));
1187
1188 fd = open(FULLPATH, O_RDONLY);
1189 ASSERT_LE(0, fd) << strerror(errno);
1190
1191 p = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
1192 ASSERT_NE(MAP_FAILED, p) << strerror(errno);
1193
1194 /* Accessing the mapped page should return SIGBUS. */
1195
1196 bzero(&sa, sizeof(sa));
1197 sa.sa_handler = SIG_DFL;
1198 sa.sa_sigaction = handle_sigbus;
1199 sa.sa_flags = SA_RESETHAND | SA_SIGINFO;
1200 ASSERT_EQ(0, sigaction(SIGBUS, &sa, NULL)) << strerror(errno);
1201 if (setjmp(ReadSigbus::s_jmpbuf) == 0) {
1202 atomic_signal_fence(std::memory_order::memory_order_seq_cst);
1203 volatile char x __unused = *(volatile char*)p;
1204 FAIL() << "shouldn't get here";
1205 }
1206
1207 ASSERT_EQ(p, ReadSigbus::s_si_addr);
1208 ASSERT_EQ(0, munmap(p, len)) << strerror(errno);
1209 leak(fd);
1210 }
1211
1212 /*
1213 * A read via mmap comes up short, indicating that the file was truncated
1214 * server-side.
1215 */
TEST_F(Read,mmap_eof)1216 TEST_F(Read, mmap_eof)
1217 {
1218 const char FULLPATH[] = "mountpoint/some_file.txt";
1219 const char RELPATH[] = "some_file.txt";
1220 const char *CONTENTS = "abcdefgh";
1221 uint64_t ino = 42;
1222 int fd;
1223 ssize_t len;
1224 size_t bufsize = strlen(CONTENTS);
1225 struct stat sb;
1226 void *p;
1227
1228 len = getpagesize();
1229
1230 expect_lookup(RELPATH, ino, m_maxbcachebuf);
1231 expect_open(ino, 0, 1);
1232 EXPECT_CALL(*m_mock, process(
1233 ResultOf([=](auto in) {
1234 return (in.header.opcode == FUSE_READ &&
1235 in.header.nodeid == ino &&
1236 in.body.read.fh == Read::FH &&
1237 in.body.read.offset == 0 &&
1238 in.body.read.size == (uint32_t)m_maxbcachebuf);
1239 }, Eq(true)),
1240 _)
1241 ).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
1242 out.header.len = sizeof(struct fuse_out_header) + bufsize;
1243 memmove(out.body.bytes, CONTENTS, bufsize);
1244 })));
1245 expect_getattr(ino, bufsize);
1246
1247 fd = open(FULLPATH, O_RDONLY);
1248 ASSERT_LE(0, fd) << strerror(errno);
1249
1250 p = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
1251 ASSERT_NE(MAP_FAILED, p) << strerror(errno);
1252
1253 /* The file size should be automatically truncated */
1254 ASSERT_EQ(0, memcmp(p, CONTENTS, bufsize));
1255 ASSERT_EQ(0, fstat(fd, &sb)) << strerror(errno);
1256 EXPECT_EQ((off_t)bufsize, sb.st_size);
1257
1258 ASSERT_EQ(0, munmap(p, len)) << strerror(errno);
1259 leak(fd);
1260 }
1261
1262 /*
1263 * During VOP_GETPAGES, the FUSE server fails a FUSE_GETATTR operation. This
1264 * almost certainly indicates a buggy FUSE server, and our goal should be not
1265 * to panic. Instead, generate SIGBUS.
1266 */
TEST_F(ReadSigbus,mmap_getblksz_fail)1267 TEST_F(ReadSigbus, mmap_getblksz_fail)
1268 {
1269 const char FULLPATH[] = "mountpoint/some_file.txt";
1270 const char RELPATH[] = "some_file.txt";
1271 const char *CONTENTS = "abcdefgh";
1272 struct sigaction sa;
1273 Sequence seq;
1274 uint64_t ino = 42;
1275 int fd;
1276 ssize_t len;
1277 size_t bufsize = strlen(CONTENTS);
1278 mode_t mode = S_IFREG | 0644;
1279 void *p;
1280
1281 len = getpagesize();
1282
1283 FuseTest::expect_lookup(RELPATH, ino, mode, bufsize, 1, 0);
1284 /* Expect two GETATTR calls that succeed, followed by one that fail. */
1285 EXPECT_CALL(*m_mock, process(
1286 ResultOf([=](auto in) {
1287 return (in.header.opcode == FUSE_GETATTR &&
1288 in.header.nodeid == ino);
1289 }, Eq(true)),
1290 _)
1291 ).Times(2)
1292 .InSequence(seq)
1293 .WillRepeatedly(Invoke(ReturnImmediate([=](auto i __unused, auto& out) {
1294 SET_OUT_HEADER_LEN(out, attr);
1295 out.body.attr.attr.ino = ino;
1296 out.body.attr.attr.mode = mode;
1297 out.body.attr.attr.size = bufsize;
1298 out.body.attr.attr_valid = 0;
1299 })));
1300 EXPECT_CALL(*m_mock, process(
1301 ResultOf([=](auto in) {
1302 return (in.header.opcode == FUSE_GETATTR &&
1303 in.header.nodeid == ino);
1304 }, Eq(true)),
1305 _)
1306 ).InSequence(seq)
1307 .WillRepeatedly(Invoke(ReturnErrno(EIO)));
1308 expect_open(ino, 0, 1);
1309 EXPECT_CALL(*m_mock, process(
1310 ResultOf([=](auto in) {
1311 return (in.header.opcode == FUSE_READ);
1312 }, Eq(true)),
1313 _)
1314 ).Times(0);
1315
1316 fd = open(FULLPATH, O_RDONLY);
1317 ASSERT_LE(0, fd) << strerror(errno);
1318
1319 p = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
1320 ASSERT_NE(MAP_FAILED, p) << strerror(errno);
1321
1322 /* Accessing the mapped page should return SIGBUS. */
1323 bzero(&sa, sizeof(sa));
1324 sa.sa_handler = SIG_DFL;
1325 sa.sa_sigaction = handle_sigbus;
1326 sa.sa_flags = SA_RESETHAND | SA_SIGINFO;
1327 ASSERT_EQ(0, sigaction(SIGBUS, &sa, NULL)) << strerror(errno);
1328 if (setjmp(ReadSigbus::s_jmpbuf) == 0) {
1329 atomic_signal_fence(std::memory_order::memory_order_seq_cst);
1330 volatile char x __unused = *(volatile char*)p;
1331 FAIL() << "shouldn't get here";
1332 }
1333
1334 ASSERT_EQ(p, ReadSigbus::s_si_addr);
1335 ASSERT_EQ(0, munmap(p, len)) << strerror(errno);
1336 leak(fd);
1337 }
1338
1339 /*
1340 * Just as when FOPEN_DIRECT_IO is used, reads with O_DIRECT should bypass
1341 * cache and to straight to the daemon
1342 */
TEST_F(Read,o_direct)1343 TEST_F(Read, o_direct)
1344 {
1345 const char FULLPATH[] = "mountpoint/some_file.txt";
1346 const char RELPATH[] = "some_file.txt";
1347 const char *CONTENTS = "abcdefgh";
1348 uint64_t ino = 42;
1349 int fd;
1350 ssize_t bufsize = strlen(CONTENTS);
1351 uint8_t buf[bufsize];
1352
1353 expect_lookup(RELPATH, ino, bufsize);
1354 expect_open(ino, 0, 1);
1355 expect_read(ino, 0, bufsize, bufsize, CONTENTS);
1356
1357 fd = open(FULLPATH, O_RDONLY);
1358 ASSERT_LE(0, fd) << strerror(errno);
1359
1360 // Fill the cache
1361 ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
1362 ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
1363
1364 // Reads with o_direct should bypass the cache
1365 expect_read(ino, 0, bufsize, bufsize, CONTENTS);
1366 ASSERT_EQ(0, fcntl(fd, F_SETFL, O_DIRECT)) << strerror(errno);
1367 ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
1368 ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
1369 ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
1370
1371 leak(fd);
1372 }
1373
TEST_F(Read,pread)1374 TEST_F(Read, pread)
1375 {
1376 const char FULLPATH[] = "mountpoint/some_file.txt";
1377 const char RELPATH[] = "some_file.txt";
1378 const char *CONTENTS = "abcdefgh";
1379 uint64_t ino = 42;
1380 int fd;
1381 /*
1382 * Set offset to a maxbcachebuf boundary so we'll be sure what offset
1383 * to read from. Without this, the read might start at a lower offset.
1384 */
1385 uint64_t offset = m_maxbcachebuf;
1386 ssize_t bufsize = strlen(CONTENTS);
1387 uint8_t buf[bufsize];
1388
1389 expect_lookup(RELPATH, ino, offset + bufsize);
1390 expect_open(ino, 0, 1);
1391 expect_read(ino, offset, bufsize, bufsize, CONTENTS);
1392
1393 fd = open(FULLPATH, O_RDONLY);
1394 ASSERT_LE(0, fd) << strerror(errno);
1395
1396 ASSERT_EQ(bufsize, pread(fd, buf, bufsize, offset)) << strerror(errno);
1397 ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
1398 leak(fd);
1399 }
1400
TEST_F(Read,read)1401 TEST_F(Read, read)
1402 {
1403 const char FULLPATH[] = "mountpoint/some_file.txt";
1404 const char RELPATH[] = "some_file.txt";
1405 const char *CONTENTS = "abcdefgh";
1406 uint64_t ino = 42;
1407 int fd;
1408 ssize_t bufsize = strlen(CONTENTS);
1409 uint8_t buf[bufsize];
1410
1411 expect_lookup(RELPATH, ino, bufsize);
1412 expect_open(ino, 0, 1);
1413 expect_read(ino, 0, bufsize, bufsize, CONTENTS);
1414
1415 fd = open(FULLPATH, O_RDONLY);
1416 ASSERT_LE(0, fd) << strerror(errno);
1417
1418 ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
1419 ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
1420
1421 leak(fd);
1422 }
1423
TEST_F(Read_7_8,read)1424 TEST_F(Read_7_8, read)
1425 {
1426 const char FULLPATH[] = "mountpoint/some_file.txt";
1427 const char RELPATH[] = "some_file.txt";
1428 const char *CONTENTS = "abcdefgh";
1429 uint64_t ino = 42;
1430 int fd;
1431 ssize_t bufsize = strlen(CONTENTS);
1432 uint8_t buf[bufsize];
1433
1434 expect_lookup(RELPATH, ino, bufsize);
1435 expect_open(ino, 0, 1);
1436 expect_read(ino, 0, bufsize, bufsize, CONTENTS);
1437
1438 fd = open(FULLPATH, O_RDONLY);
1439 ASSERT_LE(0, fd) << strerror(errno);
1440
1441 ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
1442 ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
1443
1444 leak(fd);
1445 }
1446
1447 /*
1448 * If cacheing is enabled, the kernel should try to read an entire cache block
1449 * at a time.
1450 */
TEST_F(Read,cache_block)1451 TEST_F(Read, cache_block)
1452 {
1453 const char FULLPATH[] = "mountpoint/some_file.txt";
1454 const char RELPATH[] = "some_file.txt";
1455 const char *CONTENTS0 = "abcdefghijklmnop";
1456 uint64_t ino = 42;
1457 int fd;
1458 ssize_t bufsize = 8;
1459 ssize_t filesize = m_maxbcachebuf * 2;
1460 char *contents;
1461 char buf[bufsize];
1462 const char *contents1 = CONTENTS0 + bufsize;
1463
1464 contents = new char[filesize]();
1465 memmove(contents, CONTENTS0, strlen(CONTENTS0));
1466
1467 expect_lookup(RELPATH, ino, filesize);
1468 expect_open(ino, 0, 1);
1469 expect_read(ino, 0, m_maxbcachebuf, m_maxbcachebuf,
1470 contents);
1471
1472 fd = open(FULLPATH, O_RDONLY);
1473 ASSERT_LE(0, fd) << strerror(errno);
1474
1475 ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
1476 ASSERT_EQ(0, memcmp(buf, CONTENTS0, bufsize));
1477
1478 /* A subsequent read should be serviced by cache */
1479 ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
1480 ASSERT_EQ(0, memcmp(buf, contents1, bufsize));
1481 leak(fd);
1482 delete[] contents;
1483 }
1484
1485 /* Reading with sendfile should work (though it obviously won't be 0-copy) */
TEST_F(Read,sendfile)1486 TEST_F(Read, sendfile)
1487 {
1488 const char FULLPATH[] = "mountpoint/some_file.txt";
1489 const char RELPATH[] = "some_file.txt";
1490 const char *CONTENTS = "abcdefgh";
1491 uint64_t ino = 42;
1492 int fd;
1493 size_t bufsize = strlen(CONTENTS);
1494 uint8_t buf[bufsize];
1495 int sp[2];
1496 off_t sbytes;
1497
1498 expect_lookup(RELPATH, ino, bufsize);
1499 expect_open(ino, 0, 1);
1500 EXPECT_CALL(*m_mock, process(
1501 ResultOf([=](auto in) {
1502 return (in.header.opcode == FUSE_READ &&
1503 in.header.nodeid == ino &&
1504 in.body.read.fh == Read::FH &&
1505 in.body.read.offset == 0 &&
1506 in.body.read.size == bufsize);
1507 }, Eq(true)),
1508 _)
1509 ).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
1510 out.header.len = sizeof(struct fuse_out_header) + bufsize;
1511 memmove(out.body.bytes, CONTENTS, bufsize);
1512 })));
1513
1514 ASSERT_EQ(0, socketpair(PF_LOCAL, SOCK_STREAM, 0, sp))
1515 << strerror(errno);
1516 fd = open(FULLPATH, O_RDONLY);
1517 ASSERT_LE(0, fd) << strerror(errno);
1518
1519 ASSERT_EQ(0, sendfile(fd, sp[1], 0, bufsize, NULL, &sbytes, 0))
1520 << strerror(errno);
1521 ASSERT_EQ(static_cast<ssize_t>(bufsize), read(sp[0], buf, bufsize))
1522 << strerror(errno);
1523 ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
1524
1525 close(sp[1]);
1526 close(sp[0]);
1527 leak(fd);
1528 }
1529
1530 /* sendfile should fail gracefully if fuse declines the read */
1531 /* https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=236466 */
TEST_F(Read,sendfile_eio)1532 TEST_F(Read, sendfile_eio)
1533 {
1534 const char FULLPATH[] = "mountpoint/some_file.txt";
1535 const char RELPATH[] = "some_file.txt";
1536 const char *CONTENTS = "abcdefgh";
1537 uint64_t ino = 42;
1538 int fd;
1539 ssize_t bufsize = strlen(CONTENTS);
1540 int sp[2];
1541 off_t sbytes;
1542
1543 expect_lookup(RELPATH, ino, bufsize);
1544 expect_open(ino, 0, 1);
1545 EXPECT_CALL(*m_mock, process(
1546 ResultOf([=](auto in) {
1547 return (in.header.opcode == FUSE_READ);
1548 }, Eq(true)),
1549 _)
1550 ).WillOnce(Invoke(ReturnErrno(EIO)));
1551
1552 ASSERT_EQ(0, socketpair(PF_LOCAL, SOCK_STREAM, 0, sp))
1553 << strerror(errno);
1554 fd = open(FULLPATH, O_RDONLY);
1555 ASSERT_LE(0, fd) << strerror(errno);
1556
1557 ASSERT_NE(0, sendfile(fd, sp[1], 0, bufsize, NULL, &sbytes, 0));
1558
1559 close(sp[1]);
1560 close(sp[0]);
1561 leak(fd);
1562 }
1563
1564 /*
1565 * Sequential reads should use readahead. And if allowed, large reads should
1566 * be clustered.
1567 */
TEST_P(ReadAhead,readahead)1568 TEST_P(ReadAhead, readahead) {
1569 const char FULLPATH[] = "mountpoint/some_file.txt";
1570 const char RELPATH[] = "some_file.txt";
1571 uint64_t ino = 42;
1572 int fd, maxcontig, clustersize;
1573 ssize_t bufsize = 4 * m_maxbcachebuf;
1574 ssize_t filesize = bufsize;
1575 uint64_t len;
1576 char *rbuf, *contents;
1577 off_t offs;
1578
1579 contents = new char[filesize];
1580 memset(contents, 'X', filesize);
1581 rbuf = new char[bufsize]();
1582
1583 expect_lookup(RELPATH, ino, filesize);
1584 expect_open(ino, 0, 1);
1585 maxcontig = m_noclusterr ? m_maxbcachebuf :
1586 m_maxbcachebuf + m_maxreadahead;
1587 clustersize = MIN((unsigned long )maxcontig, m_maxphys);
1588 for (offs = 0; offs < bufsize; offs += clustersize) {
1589 len = std::min((size_t)clustersize, (size_t)(filesize - offs));
1590 expect_read(ino, offs, len, len, contents + offs);
1591 }
1592
1593 fd = open(FULLPATH, O_RDONLY);
1594 ASSERT_LE(0, fd) << strerror(errno);
1595
1596 /* Set the internal readahead counter to a "large" value */
1597 ASSERT_EQ(0, fcntl(fd, F_READAHEAD, 1'000'000'000)) << strerror(errno);
1598
1599 ASSERT_EQ(bufsize, read(fd, rbuf, bufsize)) << strerror(errno);
1600 ASSERT_EQ(0, memcmp(rbuf, contents, bufsize));
1601
1602 leak(fd);
1603 delete[] rbuf;
1604 delete[] contents;
1605 }
1606
1607 INSTANTIATE_TEST_SUITE_P(RA, ReadAhead,
1608 Values(tuple<bool, int>(false, 0),
1609 tuple<bool, int>(false, 1),
1610 tuple<bool, int>(false, 2),
1611 tuple<bool, int>(false, 3),
1612 tuple<bool, int>(true, 0),
1613 tuple<bool, int>(true, 1),
1614 tuple<bool, int>(true, 2)));
1615
1616 /* With read-only mounts, fuse should never update atime during close */
TEST_F(RofsRead,atime_during_close)1617 TEST_F(RofsRead, atime_during_close)
1618 {
1619 const char FULLPATH[] = "mountpoint/some_file.txt";
1620 const char RELPATH[] = "some_file.txt";
1621 const char *CONTENTS = "abcdefgh";
1622 uint64_t ino = 42;
1623 int fd;
1624 ssize_t bufsize = strlen(CONTENTS);
1625 uint8_t buf[bufsize];
1626
1627 expect_lookup(RELPATH, ino, bufsize);
1628 expect_open(ino, 0, 1);
1629 expect_read(ino, 0, bufsize, bufsize, CONTENTS);
1630 EXPECT_CALL(*m_mock, process(
1631 ResultOf([&](auto in) {
1632 return (in.header.opcode == FUSE_SETATTR);
1633 }, Eq(true)),
1634 _)
1635 ).Times(0);
1636 expect_flush(ino, 1, ReturnErrno(0));
1637 expect_release(ino, FuseTest::FH);
1638
1639 fd = open(FULLPATH, O_RDONLY);
1640 ASSERT_LE(0, fd) << strerror(errno);
1641
1642 /* Ensure atime will be different than during lookup */
1643 nap();
1644
1645 ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
1646
1647 close(fd);
1648 }
1649
1650 /* fuse_init_out.time_gran controls the granularity of timestamps */
TEST_P(TimeGran,atime_during_setattr)1651 TEST_P(TimeGran, atime_during_setattr)
1652 {
1653 const char FULLPATH[] = "mountpoint/some_file.txt";
1654 const char RELPATH[] = "some_file.txt";
1655 const char *CONTENTS = "abcdefgh";
1656 ssize_t bufsize = strlen(CONTENTS);
1657 uint8_t buf[bufsize];
1658 uint64_t ino = 42;
1659 const mode_t newmode = 0755;
1660 int fd;
1661
1662 expect_lookup(RELPATH, ino, bufsize);
1663 expect_open(ino, 0, 1);
1664 expect_read(ino, 0, bufsize, bufsize, CONTENTS);
1665 EXPECT_CALL(*m_mock, process(
1666 ResultOf([=](auto in) {
1667 uint32_t valid = FATTR_MODE | FATTR_ATIME;
1668 return (in.header.opcode == FUSE_SETATTR &&
1669 in.header.nodeid == ino &&
1670 in.body.setattr.valid == valid &&
1671 in.body.setattr.atimensec % m_time_gran == 0);
1672 }, Eq(true)),
1673 _)
1674 ).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
1675 SET_OUT_HEADER_LEN(out, attr);
1676 out.body.attr.attr.ino = ino;
1677 out.body.attr.attr.mode = S_IFREG | newmode;
1678 })));
1679
1680 fd = open(FULLPATH, O_RDWR);
1681 ASSERT_LE(0, fd) << strerror(errno);
1682
1683 ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
1684 ASSERT_EQ(0, fchmod(fd, newmode)) << strerror(errno);
1685
1686 leak(fd);
1687 }
1688
1689 INSTANTIATE_TEST_SUITE_P(TG, TimeGran, Range(0u, 10u));
1690