1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2019 The FreeBSD Foundation
5 *
6 * This software was developed by BFF Storage Systems, LLC under sponsorship
7 * from the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 extern "C" {
32 #include <sys/types.h>
33
34 #include <fcntl.h>
35 #include <pthread.h>
36 }
37
38 #include "mockfs.hh"
39 #include "utils.hh"
40
41 using namespace testing;
42
43 /*
44 * FUSE asynchonous notification
45 *
46 * FUSE servers can send unprompted notification messages for things like cache
47 * invalidation. This file tests our client's handling of those messages.
48 */
49
50 class Notify: public FuseTest,
51 public WithParamInterface<int>
52 {
53 public:
SetUp()54 virtual void SetUp() {
55 m_init_flags |= GetParam();
56 FuseTest::SetUp();
57 }
58
59 /* Ignore an optional FUSE_FSYNC */
maybe_expect_fsync(uint64_t ino)60 void maybe_expect_fsync(uint64_t ino)
61 {
62 EXPECT_CALL(*m_mock, process(
63 ResultOf([=](auto in) {
64 return (in.header.opcode == FUSE_FSYNC &&
65 in.header.nodeid == ino);
66 }, Eq(true)),
67 _)
68 ).WillOnce(Invoke(ReturnErrno(0)));
69 }
70
expect_lookup(uint64_t parent,const char * relpath,uint64_t ino,off_t size,Sequence & seq)71 void expect_lookup(uint64_t parent, const char *relpath, uint64_t ino,
72 off_t size, Sequence &seq)
73 {
74 EXPECT_LOOKUP(parent, relpath)
75 .InSequence(seq)
76 .WillOnce(Invoke(
77 ReturnImmediate([=](auto in __unused, auto& out) {
78 SET_OUT_HEADER_LEN(out, entry);
79 out.body.entry.attr.mode = S_IFREG | 0644;
80 out.body.entry.nodeid = ino;
81 out.body.entry.attr.ino = ino;
82 out.body.entry.attr.nlink = 1;
83 out.body.entry.attr.size = size;
84 out.body.entry.attr_valid = UINT64_MAX;
85 out.body.entry.entry_valid = UINT64_MAX;
86 })));
87 }
88 };
89
90 class NotifyWriteback: public Notify {
91 public:
SetUp()92 virtual void SetUp() {
93 m_init_flags |= FUSE_WRITEBACK_CACHE;
94 m_async = true;
95 Notify::SetUp();
96 if (IsSkipped())
97 return;
98 }
99
expect_write(uint64_t ino,uint64_t offset,uint64_t size,const void * contents)100 void expect_write(uint64_t ino, uint64_t offset, uint64_t size,
101 const void *contents)
102 {
103 FuseTest::expect_write(ino, offset, size, size, 0, 0, contents);
104 }
105
106 };
107
108 struct inval_entry_args {
109 MockFS *mock;
110 ino_t parent;
111 const char *name;
112 size_t namelen;
113 };
114
inval_entry(void * arg)115 static void* inval_entry(void* arg) {
116 const struct inval_entry_args *iea = (struct inval_entry_args*)arg;
117 ssize_t r;
118
119 r = iea->mock->notify_inval_entry(iea->parent, iea->name, iea->namelen);
120 if (r >= 0)
121 return 0;
122 else
123 return (void*)(intptr_t)errno;
124 }
125
126 struct inval_inode_args {
127 MockFS *mock;
128 ino_t ino;
129 off_t off;
130 ssize_t len;
131 };
132
133 struct store_args {
134 MockFS *mock;
135 ino_t nodeid;
136 off_t offset;
137 ssize_t size;
138 const void* data;
139 };
140
inval_inode(void * arg)141 static void* inval_inode(void* arg) {
142 const struct inval_inode_args *iia = (struct inval_inode_args*)arg;
143 ssize_t r;
144
145 r = iia->mock->notify_inval_inode(iia->ino, iia->off, iia->len);
146 if (r >= 0)
147 return 0;
148 else
149 return (void*)(intptr_t)errno;
150 }
151
store(void * arg)152 static void* store(void* arg) {
153 const struct store_args *sa = (struct store_args*)arg;
154 ssize_t r;
155
156 r = sa->mock->notify_store(sa->nodeid, sa->offset, sa->data, sa->size);
157 if (r >= 0)
158 return 0;
159 else
160 return (void*)(intptr_t)errno;
161 }
162
163 /* Invalidate a nonexistent entry */
TEST_P(Notify,inval_entry_nonexistent)164 TEST_P(Notify, inval_entry_nonexistent)
165 {
166 const static char *name = "foo";
167 struct inval_entry_args iea;
168 void *thr0_value;
169 pthread_t th0;
170
171 iea.mock = m_mock;
172 iea.parent = FUSE_ROOT_ID;
173 iea.name = name;
174 iea.namelen = strlen(name);
175 ASSERT_EQ(0, pthread_create(&th0, NULL, inval_entry, &iea))
176 << strerror(errno);
177 pthread_join(th0, &thr0_value);
178 /* It's not an error for an entry to not be cached */
179 EXPECT_EQ(0, (intptr_t)thr0_value);
180 }
181
182 /* Invalidate a cached entry */
TEST_P(Notify,inval_entry)183 TEST_P(Notify, inval_entry)
184 {
185 const static char FULLPATH[] = "mountpoint/foo";
186 const static char RELPATH[] = "foo";
187 struct inval_entry_args iea;
188 struct stat sb;
189 void *thr0_value;
190 uint64_t ino0 = 42;
191 uint64_t ino1 = 43;
192 Sequence seq;
193 pthread_t th0;
194
195 expect_lookup(FUSE_ROOT_ID, RELPATH, ino0, 0, seq);
196 expect_lookup(FUSE_ROOT_ID, RELPATH, ino1, 0, seq);
197
198 /* Fill the entry cache */
199 ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno);
200 EXPECT_EQ(ino0, sb.st_ino);
201
202 /* Now invalidate the entry */
203 iea.mock = m_mock;
204 iea.parent = FUSE_ROOT_ID;
205 iea.name = RELPATH;
206 iea.namelen = strlen(RELPATH);
207 ASSERT_EQ(0, pthread_create(&th0, NULL, inval_entry, &iea))
208 << strerror(errno);
209 pthread_join(th0, &thr0_value);
210 EXPECT_EQ(0, (intptr_t)thr0_value);
211
212 /* The second lookup should return the alternate ino */
213 ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno);
214 EXPECT_EQ(ino1, sb.st_ino);
215 }
216
217 /*
218 * Invalidate a cached entry beneath the root, which uses a slightly different
219 * code path.
220 */
TEST_P(Notify,inval_entry_below_root)221 TEST_P(Notify, inval_entry_below_root)
222 {
223 const static char FULLPATH[] = "mountpoint/some_dir/foo";
224 const static char DNAME[] = "some_dir";
225 const static char FNAME[] = "foo";
226 struct inval_entry_args iea;
227 struct stat sb;
228 void *thr0_value;
229 uint64_t dir_ino = 41;
230 uint64_t ino0 = 42;
231 uint64_t ino1 = 43;
232 Sequence seq;
233 pthread_t th0;
234
235 EXPECT_LOOKUP(FUSE_ROOT_ID, DNAME)
236 .WillOnce(Invoke(
237 ReturnImmediate([=](auto in __unused, auto& out) {
238 SET_OUT_HEADER_LEN(out, entry);
239 out.body.entry.attr.mode = S_IFDIR | 0755;
240 out.body.entry.nodeid = dir_ino;
241 out.body.entry.attr.nlink = 2;
242 out.body.entry.attr_valid = UINT64_MAX;
243 out.body.entry.entry_valid = UINT64_MAX;
244 })));
245 expect_lookup(dir_ino, FNAME, ino0, 0, seq);
246 expect_lookup(dir_ino, FNAME, ino1, 0, seq);
247
248 /* Fill the entry cache */
249 ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno);
250 EXPECT_EQ(ino0, sb.st_ino);
251
252 /* Now invalidate the entry */
253 iea.mock = m_mock;
254 iea.parent = dir_ino;
255 iea.name = FNAME;
256 iea.namelen = strlen(FNAME);
257 ASSERT_EQ(0, pthread_create(&th0, NULL, inval_entry, &iea))
258 << strerror(errno);
259 pthread_join(th0, &thr0_value);
260 EXPECT_EQ(0, (intptr_t)thr0_value);
261
262 /* The second lookup should return the alternate ino */
263 ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno);
264 EXPECT_EQ(ino1, sb.st_ino);
265 }
266
267 /* Invalidating an entry invalidates the parent directory's attributes */
TEST_P(Notify,inval_entry_invalidates_parent_attrs)268 TEST_P(Notify, inval_entry_invalidates_parent_attrs)
269 {
270 const static char FULLPATH[] = "mountpoint/foo";
271 const static char RELPATH[] = "foo";
272 struct inval_entry_args iea;
273 struct stat sb;
274 void *thr0_value;
275 uint64_t ino = 42;
276 Sequence seq;
277 pthread_t th0;
278
279 expect_lookup(FUSE_ROOT_ID, RELPATH, ino, 0, seq);
280 EXPECT_CALL(*m_mock, process(
281 ResultOf([=](auto in) {
282 return (in.header.opcode == FUSE_GETATTR &&
283 in.header.nodeid == FUSE_ROOT_ID);
284 }, Eq(true)),
285 _)
286 ).Times(2)
287 .WillRepeatedly(Invoke(ReturnImmediate([=](auto i __unused, auto& out) {
288 SET_OUT_HEADER_LEN(out, attr);
289 out.body.attr.attr.mode = S_IFDIR | 0755;
290 out.body.attr.attr_valid = UINT64_MAX;
291 })));
292
293 /* Fill the attr and entry cache */
294 ASSERT_EQ(0, stat("mountpoint", &sb)) << strerror(errno);
295 ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno);
296
297 /* Now invalidate the entry */
298 iea.mock = m_mock;
299 iea.parent = FUSE_ROOT_ID;
300 iea.name = RELPATH;
301 iea.namelen = strlen(RELPATH);
302 ASSERT_EQ(0, pthread_create(&th0, NULL, inval_entry, &iea))
303 << strerror(errno);
304 pthread_join(th0, &thr0_value);
305 EXPECT_EQ(0, (intptr_t)thr0_value);
306
307 /* /'s attribute cache should be cleared */
308 ASSERT_EQ(0, stat("mountpoint", &sb)) << strerror(errno);
309 }
310
311
TEST_P(Notify,inval_inode_nonexistent)312 TEST_P(Notify, inval_inode_nonexistent)
313 {
314 struct inval_inode_args iia;
315 ino_t ino = 42;
316 void *thr0_value;
317 pthread_t th0;
318
319 iia.mock = m_mock;
320 iia.ino = ino;
321 iia.off = 0;
322 iia.len = 0;
323 ASSERT_EQ(0, pthread_create(&th0, NULL, inval_inode, &iia))
324 << strerror(errno);
325 pthread_join(th0, &thr0_value);
326 /* It's not an error for an inode to not be cached */
327 EXPECT_EQ(0, (intptr_t)thr0_value);
328 }
329
TEST_P(Notify,inval_inode_with_clean_cache)330 TEST_P(Notify, inval_inode_with_clean_cache)
331 {
332 const static char FULLPATH[] = "mountpoint/foo";
333 const static char RELPATH[] = "foo";
334 const char CONTENTS0[] = "abcdefgh";
335 const char CONTENTS1[] = "ijklmnopqrstuvwxyz";
336 struct inval_inode_args iia;
337 struct stat sb;
338 ino_t ino = 42;
339 void *thr0_value;
340 Sequence seq;
341 uid_t uid = 12345;
342 pthread_t th0;
343 ssize_t size0 = sizeof(CONTENTS0);
344 ssize_t size1 = sizeof(CONTENTS1);
345 char buf[80];
346 int fd;
347
348 expect_lookup(FUSE_ROOT_ID, RELPATH, ino, size0, seq);
349 expect_open(ino, 0, 1);
350 EXPECT_CALL(*m_mock, process(
351 ResultOf([=](auto in) {
352 return (in.header.opcode == FUSE_GETATTR &&
353 in.header.nodeid == ino);
354 }, Eq(true)),
355 _)
356 ).WillOnce(Invoke(ReturnImmediate([=](auto i __unused, auto& out) {
357 SET_OUT_HEADER_LEN(out, attr);
358 out.body.attr.attr.mode = S_IFREG | 0644;
359 out.body.attr.attr_valid = UINT64_MAX;
360 out.body.attr.attr.size = size1;
361 out.body.attr.attr.uid = uid;
362 })));
363 expect_read(ino, 0, size0, size0, CONTENTS0);
364 expect_read(ino, 0, size1, size1, CONTENTS1);
365
366 /* Fill the data cache */
367 fd = open(FULLPATH, O_RDWR);
368 ASSERT_LE(0, fd) << strerror(errno);
369 ASSERT_EQ(size0, read(fd, buf, size0)) << strerror(errno);
370 EXPECT_EQ(0, memcmp(buf, CONTENTS0, size0));
371
372 /* Evict the data cache */
373 iia.mock = m_mock;
374 iia.ino = ino;
375 iia.off = 0;
376 iia.len = 0;
377 ASSERT_EQ(0, pthread_create(&th0, NULL, inval_inode, &iia))
378 << strerror(errno);
379 pthread_join(th0, &thr0_value);
380 EXPECT_EQ(0, (intptr_t)thr0_value);
381
382 /* cache attributes were purged; this will trigger a new GETATTR */
383 ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno);
384 EXPECT_EQ(uid, sb.st_uid);
385 EXPECT_EQ(size1, sb.st_size);
386
387 /* This read should not be serviced by cache */
388 ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
389 ASSERT_EQ(size1, read(fd, buf, size1)) << strerror(errno);
390 EXPECT_EQ(0, memcmp(buf, CONTENTS1, size1));
391
392 leak(fd);
393 }
394
395 /*
396 * Attempting to invalidate an entry or inode after unmounting should fail, but
397 * nothing bad should happen.
398 * https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=290519
399 */
TEST_P(Notify,notify_after_unmount)400 TEST_P(Notify, notify_after_unmount)
401 {
402 const static char *name = "foo";
403 struct inval_entry_args iea;
404
405 expect_destroy(0);
406
407 m_mock->unmount();
408
409 iea.mock = m_mock;
410 iea.parent = FUSE_ROOT_ID;
411 iea.name = name;
412 iea.namelen = strlen(name);
413 iea.mock->notify_inval_entry(iea.parent, iea.name, iea.namelen, ENODEV);
414 }
415
416 /* FUSE_NOTIFY_STORE with a file that's not in the entry cache */
417 /* disabled because FUSE_NOTIFY_STORE is not yet implemented */
TEST_P(Notify,DISABLED_store_nonexistent)418 TEST_P(Notify, DISABLED_store_nonexistent)
419 {
420 struct store_args sa;
421 ino_t ino = 42;
422 void *thr0_value;
423 pthread_t th0;
424
425 sa.mock = m_mock;
426 sa.nodeid = ino;
427 sa.offset = 0;
428 sa.size = 0;
429 ASSERT_EQ(0, pthread_create(&th0, NULL, store, &sa)) << strerror(errno);
430 pthread_join(th0, &thr0_value);
431 /* It's not an error for a file to be unknown to the kernel */
432 EXPECT_EQ(0, (intptr_t)thr0_value);
433 }
434
435 /* Store data into for a file that does not yet have anything cached */
436 /* disabled because FUSE_NOTIFY_STORE is not yet implemented */
TEST_P(Notify,DISABLED_store_with_blank_cache)437 TEST_P(Notify, DISABLED_store_with_blank_cache)
438 {
439 const static char FULLPATH[] = "mountpoint/foo";
440 const static char RELPATH[] = "foo";
441 const char CONTENTS1[] = "ijklmnopqrstuvwxyz";
442 struct store_args sa;
443 ino_t ino = 42;
444 void *thr0_value;
445 Sequence seq;
446 pthread_t th0;
447 ssize_t size1 = sizeof(CONTENTS1);
448 char buf[80];
449 int fd;
450
451 expect_lookup(FUSE_ROOT_ID, RELPATH, ino, size1, seq);
452 expect_open(ino, 0, 1);
453
454 /* Fill the data cache */
455 fd = open(FULLPATH, O_RDWR);
456 ASSERT_LE(0, fd) << strerror(errno);
457
458 /* Evict the data cache */
459 sa.mock = m_mock;
460 sa.nodeid = ino;
461 sa.offset = 0;
462 sa.size = size1;
463 sa.data = (const void*)CONTENTS1;
464 ASSERT_EQ(0, pthread_create(&th0, NULL, store, &sa)) << strerror(errno);
465 pthread_join(th0, &thr0_value);
466 EXPECT_EQ(0, (intptr_t)thr0_value);
467
468 /* This read should be serviced by cache */
469 ASSERT_EQ(size1, read(fd, buf, size1)) << strerror(errno);
470 EXPECT_EQ(0, memcmp(buf, CONTENTS1, size1));
471
472 leak(fd);
473 }
474
TEST_P(NotifyWriteback,inval_inode_with_dirty_cache)475 TEST_P(NotifyWriteback, inval_inode_with_dirty_cache)
476 {
477 const static char FULLPATH[] = "mountpoint/foo";
478 const static char RELPATH[] = "foo";
479 const char CONTENTS[] = "abcdefgh";
480 struct inval_inode_args iia;
481 ino_t ino = 42;
482 void *thr0_value;
483 Sequence seq;
484 pthread_t th0;
485 ssize_t bufsize = sizeof(CONTENTS);
486 int fd;
487
488 expect_lookup(FUSE_ROOT_ID, RELPATH, ino, 0, seq);
489 expect_open(ino, 0, 1);
490
491 /* Fill the data cache */
492 fd = open(FULLPATH, O_RDWR);
493 ASSERT_LE(0, fd);
494 ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
495
496 expect_write(ino, 0, bufsize, CONTENTS);
497 /*
498 * The FUSE protocol does not require an fsync here, but FreeBSD's
499 * bufobj_invalbuf sends it anyway
500 */
501 maybe_expect_fsync(ino);
502
503 /* Evict the data cache */
504 iia.mock = m_mock;
505 iia.ino = ino;
506 iia.off = 0;
507 iia.len = 0;
508 ASSERT_EQ(0, pthread_create(&th0, NULL, inval_inode, &iia))
509 << strerror(errno);
510 pthread_join(th0, &thr0_value);
511 EXPECT_EQ(0, (intptr_t)thr0_value);
512
513 leak(fd);
514 }
515
TEST_P(NotifyWriteback,inval_inode_attrs_only)516 TEST_P(NotifyWriteback, inval_inode_attrs_only)
517 {
518 const static char FULLPATH[] = "mountpoint/foo";
519 const static char RELPATH[] = "foo";
520 const char CONTENTS[] = "abcdefgh";
521 struct inval_inode_args iia;
522 struct stat sb;
523 uid_t uid = 12345;
524 ino_t ino = 42;
525 void *thr0_value;
526 Sequence seq;
527 pthread_t th0;
528 ssize_t bufsize = sizeof(CONTENTS);
529 int fd;
530
531 expect_lookup(FUSE_ROOT_ID, RELPATH, ino, 0, seq);
532 expect_open(ino, 0, 1);
533 EXPECT_CALL(*m_mock, process(
534 ResultOf([=](auto in) {
535 return (in.header.opcode == FUSE_WRITE);
536 }, Eq(true)),
537 _)
538 ).Times(0);
539 EXPECT_CALL(*m_mock, process(
540 ResultOf([=](auto in) {
541 return (in.header.opcode == FUSE_GETATTR &&
542 in.header.nodeid == ino);
543 }, Eq(true)),
544 _)
545 ).WillOnce(Invoke(ReturnImmediate([=](auto i __unused, auto& out) {
546 SET_OUT_HEADER_LEN(out, attr);
547 out.body.attr.attr.mode = S_IFREG | 0644;
548 out.body.attr.attr_valid = UINT64_MAX;
549 out.body.attr.attr.size = bufsize;
550 out.body.attr.attr.uid = uid;
551 })));
552
553 /* Fill the data cache */
554 fd = open(FULLPATH, O_RDWR);
555 ASSERT_LE(0, fd) << strerror(errno);
556 ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
557
558 /* Evict the attributes, but not data cache */
559 iia.mock = m_mock;
560 iia.ino = ino;
561 iia.off = -1;
562 iia.len = 0;
563 ASSERT_EQ(0, pthread_create(&th0, NULL, inval_inode, &iia))
564 << strerror(errno);
565 pthread_join(th0, &thr0_value);
566 EXPECT_EQ(0, (intptr_t)thr0_value);
567
568 /* cache attributes were been purged; this will trigger a new GETATTR */
569 ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno);
570 EXPECT_EQ(uid, sb.st_uid);
571 EXPECT_EQ(bufsize, sb.st_size);
572
573 leak(fd);
574 }
575
576 /*
577 * Attempting asynchronous invalidation of an Entry before mounting the file
578 * system should fail, but nothing bad should happen.
579 *
580 * Note that invalidating an inode before mount goes through the same path, and
581 * is not separately tested.
582 *
583 * https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=290519
584 */
TEST(PreMount,inval_entry_before_mount)585 TEST(PreMount, inval_entry_before_mount)
586 {
587 const static char name[] = "foo";
588 size_t namelen = strlen(name);
589 struct mockfs_buf_out *out;
590 int r;
591 int fuse_fd;
592
593 fuse_fd = open("/dev/fuse", O_CLOEXEC | O_RDWR);
594 ASSERT_GE(fuse_fd, 0) << strerror(errno);
595
596 out = new mockfs_buf_out;
597 out->header.unique = 0; /* 0 means asynchronous notification */
598 out->header.error = FUSE_NOTIFY_INVAL_ENTRY;
599 out->body.inval_entry.parent = FUSE_ROOT_ID;
600 out->body.inval_entry.namelen = namelen;
601 strlcpy((char*)&out->body.bytes + sizeof(out->body.inval_entry),
602 name, sizeof(out->body.bytes) - sizeof(out->body.inval_entry));
603 out->header.len = sizeof(out->header) + sizeof(out->body.inval_entry) +
604 namelen;
605 r = write(fuse_fd, out, out->header.len);
606 EXPECT_EQ(-1, r);
607 EXPECT_EQ(ENODEV, errno);
608 delete out;
609 }
610
611 /*
612 * Try with and without async reads, because it affects the type of vnode lock
613 * acquired in fuse_internal_invalidate_entry.
614 */
615 INSTANTIATE_TEST_SUITE_P(N, Notify, Values(0, FUSE_ASYNC_READ));
616 INSTANTIATE_TEST_SUITE_P(N, NotifyWriteback, Values(0, FUSE_ASYNC_READ));
617