Lines Matching +full:t +full:- +full:head

1 // SPDX-License-Identifier: GPL-2.0
39 * than two, then a write to a non-empty pipe may block even if the pipe is not
45 * own risk, namely: pipe writes to non-full pipes may block until the pipe is
51 * The max size that a non-root user is allowed to grow the pipe. Can
52 * be set by root in /proc/sys/fs/pipe-max-size
63 * We use head and tail indices that aren't masked off, except at the point of
65 * isn't a dead spot in the buffer, but the ring has to be a power of two and
67 * -- David Howells 2019-09-23.
70 * -- Julian Bradfield 1999-06-07.
73 * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
76 * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
81 if (pipe->files) in pipe_lock_nested()
82 mutex_lock_nested(&pipe->mutex, subclass); in pipe_lock_nested()
88 * pipe_lock() nests non-pipe inode locks (for writing to a file) in pipe_lock()
96 if (pipe->files) in pipe_unlock()
97 mutex_unlock(&pipe->mutex); in pipe_unlock()
103 mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT); in __pipe_lock()
108 mutex_unlock(&pipe->mutex); in __pipe_unlock()
128 struct page *page = buf->page; in anon_pipe_buf_release()
131 * If nobody else uses this page, and we don't already have a in anon_pipe_buf_release()
132 * temporary page, let's keep track of it as a one-deep in anon_pipe_buf_release()
135 if (page_count(page) == 1 && !pipe->tmp_page) in anon_pipe_buf_release()
136 pipe->tmp_page = page; in anon_pipe_buf_release()
144 struct page *page = buf->page; in anon_pipe_buf_try_steal()
154 * generic_pipe_buf_try_steal - attempt to take ownership of a &pipe_buffer
168 struct page *page = buf->page; in generic_pipe_buf_try_steal()
184 * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
195 return try_get_page(buf->page); in generic_pipe_buf_get()
200 * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
210 put_page(buf->page); in generic_pipe_buf_release()
220 /* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
223 unsigned int head = READ_ONCE(pipe->head); in pipe_readable() local
224 unsigned int tail = READ_ONCE(pipe->tail); in pipe_readable()
225 unsigned int writers = READ_ONCE(pipe->writers); in pipe_readable()
227 return !pipe_empty(head, tail) || !writers; in pipe_readable()
242 spin_lock_irq(&pipe->rd_wait.lock); in pipe_update_tail()
244 if (buf->flags & PIPE_BUF_FLAG_LOSS) in pipe_update_tail()
245 pipe->note_loss = true; in pipe_update_tail()
247 pipe->tail = ++tail; in pipe_update_tail()
248 spin_unlock_irq(&pipe->rd_wait.lock); in pipe_update_tail()
254 * without the spinlock - the mutex is enough. in pipe_update_tail()
256 pipe->tail = ++tail; in pipe_update_tail()
264 struct file *filp = iocb->ki_filp; in pipe_read()
265 struct pipe_inode_info *pipe = filp->private_data; in pipe_read()
284 was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage); in pipe_read()
286 /* Read ->head with a barrier vs post_one_notification() */ in pipe_read()
287 unsigned int head = smp_load_acquire(&pipe->head); in pipe_read() local
288 unsigned int tail = pipe->tail; in pipe_read()
289 unsigned int mask = pipe->ring_size - 1; in pipe_read()
292 if (pipe->note_loss) { in pipe_read()
297 ret = -ENOBUFS; in pipe_read()
306 ret = -EFAULT; in pipe_read()
310 total_len -= sizeof(n); in pipe_read()
311 pipe->note_loss = false; in pipe_read()
315 if (!pipe_empty(head, tail)) { in pipe_read()
316 struct pipe_buffer *buf = &pipe->bufs[tail & mask]; in pipe_read()
317 size_t chars = buf->len; in pipe_read()
322 if (buf->flags & PIPE_BUF_FLAG_WHOLE) { in pipe_read()
324 ret = -ENOBUFS; in pipe_read()
337 written = copy_page_to_iter(buf->page, buf->offset, chars, to); in pipe_read()
340 ret = -EFAULT; in pipe_read()
344 buf->offset += chars; in pipe_read()
345 buf->len -= chars; in pipe_read()
348 if (buf->flags & PIPE_BUF_FLAG_PACKET) { in pipe_read()
350 buf->len = 0; in pipe_read()
353 if (!buf->len) in pipe_read()
355 total_len -= chars; in pipe_read()
358 if (!pipe_empty(head, tail)) /* More to do? */ in pipe_read()
362 if (!pipe->writers) in pipe_read()
366 if ((filp->f_flags & O_NONBLOCK) || in pipe_read()
367 (iocb->ki_flags & IOCB_NOWAIT)) { in pipe_read()
368 ret = -EAGAIN; in pipe_read()
374 * We only get here if we didn't actually read anything. in pipe_read()
376 * However, we could have seen (and removed) a zero-sized in pipe_read()
380 * You can't make zero-sized pipe buffers by doing an empty in pipe_read()
391 wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM); in pipe_read()
392 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); in pipe_read()
395 * But because we didn't read anything, at this point we can in pipe_read()
396 * just return directly with -ERESTARTSYS if we're interrupted, in pipe_read()
400 if (wait_event_interruptible_exclusive(pipe->rd_wait, pipe_readable(pipe)) < 0) in pipe_read()
401 return -ERESTARTSYS; in pipe_read()
404 was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage); in pipe_read()
407 if (pipe_empty(pipe->head, pipe->tail)) in pipe_read()
412 wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM); in pipe_read()
414 wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM); in pipe_read()
415 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); in pipe_read()
423 return (file->f_flags & O_DIRECT) != 0; in is_packetized()
426 /* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
429 unsigned int head = READ_ONCE(pipe->head); in pipe_writable() local
430 unsigned int tail = READ_ONCE(pipe->tail); in pipe_writable()
431 unsigned int max_usage = READ_ONCE(pipe->max_usage); in pipe_writable()
433 return !pipe_full(head, tail, max_usage) || in pipe_writable()
434 !READ_ONCE(pipe->readers); in pipe_writable()
440 struct file *filp = iocb->ki_filp; in pipe_write()
441 struct pipe_inode_info *pipe = filp->private_data; in pipe_write()
442 unsigned int head; in pipe_write() local
456 * since we don't actually need that, it's simpler to just bail here. in pipe_write()
459 return -EXDEV; in pipe_write()
467 if (!pipe->readers) { in pipe_write()
469 ret = -EPIPE; in pipe_write()
474 * If it wasn't empty we try to merge new data into in pipe_write()
478 * page-aligns the rest of the writes for large writes in pipe_write()
481 head = pipe->head; in pipe_write()
482 was_empty = pipe_empty(head, pipe->tail); in pipe_write()
483 chars = total_len & (PAGE_SIZE-1); in pipe_write()
485 unsigned int mask = pipe->ring_size - 1; in pipe_write()
486 struct pipe_buffer *buf = &pipe->bufs[(head - 1) & mask]; in pipe_write()
487 int offset = buf->offset + buf->len; in pipe_write()
489 if ((buf->flags & PIPE_BUF_FLAG_CAN_MERGE) && in pipe_write()
495 ret = copy_page_from_iter(buf->page, offset, chars, from); in pipe_write()
497 ret = -EFAULT; in pipe_write()
501 buf->len += ret; in pipe_write()
508 if (!pipe->readers) { in pipe_write()
511 ret = -EPIPE; in pipe_write()
515 head = pipe->head; in pipe_write()
516 if (!pipe_full(head, pipe->tail, pipe->max_usage)) { in pipe_write()
517 unsigned int mask = pipe->ring_size - 1; in pipe_write()
519 struct page *page = pipe->tmp_page; in pipe_write()
525 ret = ret ? : -ENOMEM; in pipe_write()
528 pipe->tmp_page = page; in pipe_write()
536 pipe->head = head + 1; in pipe_write()
539 buf = &pipe->bufs[head & mask]; in pipe_write()
540 buf->page = page; in pipe_write()
541 buf->ops = &anon_pipe_buf_ops; in pipe_write()
542 buf->offset = 0; in pipe_write()
543 buf->len = 0; in pipe_write()
545 buf->flags = PIPE_BUF_FLAG_PACKET; in pipe_write()
547 buf->flags = PIPE_BUF_FLAG_CAN_MERGE; in pipe_write()
548 pipe->tmp_page = NULL; in pipe_write()
553 ret = -EFAULT; in pipe_write()
557 buf->len = copied; in pipe_write()
563 if (!pipe_full(head, pipe->tail, pipe->max_usage)) in pipe_write()
567 if ((filp->f_flags & O_NONBLOCK) || in pipe_write()
568 (iocb->ki_flags & IOCB_NOWAIT)) { in pipe_write()
570 ret = -EAGAIN; in pipe_write()
575 ret = -ERESTARTSYS; in pipe_write()
582 * after waiting we need to re-check whether the pipe in pipe_write()
587 wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM); in pipe_write()
588 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); in pipe_write()
589 wait_event_interruptible_exclusive(pipe->wr_wait, pipe_writable(pipe)); in pipe_write()
591 was_empty = pipe_empty(pipe->head, pipe->tail); in pipe_write()
595 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) in pipe_write()
611 if (was_empty || pipe->poll_usage) in pipe_write()
612 wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM); in pipe_write()
613 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); in pipe_write()
615 wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM); in pipe_write()
616 if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) { in pipe_write()
620 sb_end_write(file_inode(filp)->i_sb); in pipe_write()
627 struct pipe_inode_info *pipe = filp->private_data; in pipe_ioctl()
628 unsigned int count, head, tail, mask; in pipe_ioctl() local
634 head = pipe->head; in pipe_ioctl()
635 tail = pipe->tail; in pipe_ioctl()
636 mask = pipe->ring_size - 1; in pipe_ioctl()
638 while (tail != head) { in pipe_ioctl()
639 count += pipe->bufs[tail & mask].len; in pipe_ioctl()
661 return -ENOIOCTLCMD; in pipe_ioctl()
665 /* No kernel lock held - fine */
670 struct pipe_inode_info *pipe = filp->private_data; in pipe_poll()
671 unsigned int head, tail; in pipe_poll() local
674 WRITE_ONCE(pipe->poll_usage, true); in pipe_poll()
677 * Reading pipe state only -- no need for acquiring the semaphore. in pipe_poll()
682 if (filp->f_mode & FMODE_READ) in pipe_poll()
683 poll_wait(filp, &pipe->rd_wait, wait); in pipe_poll()
684 if (filp->f_mode & FMODE_WRITE) in pipe_poll()
685 poll_wait(filp, &pipe->wr_wait, wait); in pipe_poll()
692 head = READ_ONCE(pipe->head); in pipe_poll()
693 tail = READ_ONCE(pipe->tail); in pipe_poll()
696 if (filp->f_mode & FMODE_READ) { in pipe_poll()
697 if (!pipe_empty(head, tail)) in pipe_poll()
699 if (!pipe->writers && filp->f_version != pipe->w_counter) in pipe_poll()
703 if (filp->f_mode & FMODE_WRITE) { in pipe_poll()
704 if (!pipe_full(head, tail, pipe->max_usage)) in pipe_poll()
710 if (!pipe->readers) in pipe_poll()
721 spin_lock(&inode->i_lock); in put_pipe_info()
722 if (!--pipe->files) { in put_pipe_info()
723 inode->i_pipe = NULL; in put_pipe_info()
726 spin_unlock(&inode->i_lock); in put_pipe_info()
735 struct pipe_inode_info *pipe = file->private_data; in pipe_release()
738 if (file->f_mode & FMODE_READ) in pipe_release()
739 pipe->readers--; in pipe_release()
740 if (file->f_mode & FMODE_WRITE) in pipe_release()
741 pipe->writers--; in pipe_release()
744 if (!pipe->readers != !pipe->writers) { in pipe_release()
745 wake_up_interruptible_all(&pipe->rd_wait); in pipe_release()
746 wake_up_interruptible_all(&pipe->wr_wait); in pipe_release()
747 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); in pipe_release()
748 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); in pipe_release()
759 struct pipe_inode_info *pipe = filp->private_data; in pipe_fasync()
763 if (filp->f_mode & FMODE_READ) in pipe_fasync()
764 retval = fasync_helper(fd, filp, on, &pipe->fasync_readers); in pipe_fasync()
765 if ((filp->f_mode & FMODE_WRITE) && retval >= 0) { in pipe_fasync()
766 retval = fasync_helper(fd, filp, on, &pipe->fasync_writers); in pipe_fasync()
767 if (retval < 0 && (filp->f_mode & FMODE_READ)) in pipe_fasync()
768 /* this can happen only if on == T */ in pipe_fasync()
769 fasync_helper(-1, filp, 0, &pipe->fasync_readers); in pipe_fasync()
778 return atomic_long_add_return(new - old, &user->pipe_bufs); in account_pipe_buffers()
825 pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer), in alloc_pipe_info()
828 if (pipe->bufs) { in alloc_pipe_info()
829 init_waitqueue_head(&pipe->rd_wait); in alloc_pipe_info()
830 init_waitqueue_head(&pipe->wr_wait); in alloc_pipe_info()
831 pipe->r_counter = pipe->w_counter = 1; in alloc_pipe_info()
832 pipe->max_usage = pipe_bufs; in alloc_pipe_info()
833 pipe->ring_size = pipe_bufs; in alloc_pipe_info()
834 pipe->nr_accounted = pipe_bufs; in alloc_pipe_info()
835 pipe->user = user; in alloc_pipe_info()
836 mutex_init(&pipe->mutex); in alloc_pipe_info()
853 if (pipe->watch_queue) in free_pipe_info()
854 watch_queue_clear(pipe->watch_queue); in free_pipe_info()
857 (void) account_pipe_buffers(pipe->user, pipe->nr_accounted, 0); in free_pipe_info()
858 free_uid(pipe->user); in free_pipe_info()
859 for (i = 0; i < pipe->ring_size; i++) { in free_pipe_info()
860 struct pipe_buffer *buf = pipe->bufs + i; in free_pipe_info()
861 if (buf->ops) in free_pipe_info()
865 if (pipe->watch_queue) in free_pipe_info()
866 put_watch_queue(pipe->watch_queue); in free_pipe_info()
868 if (pipe->tmp_page) in free_pipe_info()
869 __free_page(pipe->tmp_page); in free_pipe_info()
870 kfree(pipe->bufs); in free_pipe_info()
882 d_inode(dentry)->i_ino); in pipefs_dname()
891 struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb); in get_pipe_inode()
897 inode->i_ino = get_next_ino(); in get_pipe_inode()
903 inode->i_pipe = pipe; in get_pipe_inode()
904 pipe->files = 2; in get_pipe_inode()
905 pipe->readers = pipe->writers = 1; in get_pipe_inode()
906 inode->i_fop = &pipefifo_fops; in get_pipe_inode()
914 inode->i_state = I_DIRTY; in get_pipe_inode()
915 inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR; in get_pipe_inode()
916 inode->i_uid = current_fsuid(); in get_pipe_inode()
917 inode->i_gid = current_fsgid(); in get_pipe_inode()
936 return -ENFILE; in create_pipe_files()
939 error = watch_queue_init(inode->i_pipe); in create_pipe_files()
941 free_pipe_info(inode->i_pipe); in create_pipe_files()
951 free_pipe_info(inode->i_pipe); in create_pipe_files()
956 f->private_data = inode->i_pipe; in create_pipe_files()
961 put_pipe_info(inode, inode->i_pipe); in create_pipe_files()
965 res[0]->private_data = inode->i_pipe; in create_pipe_files()
978 return -EINVAL; in __do_pipe_flags()
998 files[0]->f_mode |= FMODE_NOWAIT; in __do_pipe_flags()
999 files[1]->f_mode |= FMODE_NOWAIT; in __do_pipe_flags()
1038 error = -EFAULT; in do_pipe2()
1069 wait_event_interruptible(pipe->rd_wait, pipe_readable(pipe)); in pipe_wait_readable()
1076 wait_event_interruptible(pipe->wr_wait, pipe_writable(pipe)); in pipe_wait_writable()
1098 prepare_to_wait(&pipe->rd_wait, &rdwait, TASK_INTERRUPTIBLE); in wait_for_partner()
1101 finish_wait(&pipe->rd_wait, &rdwait); in wait_for_partner()
1106 return cur == *cnt ? -ERESTARTSYS : 0; in wait_for_partner()
1111 wake_up_interruptible_all(&pipe->rd_wait); in wake_up_partner()
1117 bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC; in fifo_open()
1120 filp->f_version = 0; in fifo_open()
1122 spin_lock(&inode->i_lock); in fifo_open()
1123 if (inode->i_pipe) { in fifo_open()
1124 pipe = inode->i_pipe; in fifo_open()
1125 pipe->files++; in fifo_open()
1126 spin_unlock(&inode->i_lock); in fifo_open()
1128 spin_unlock(&inode->i_lock); in fifo_open()
1131 return -ENOMEM; in fifo_open()
1132 pipe->files = 1; in fifo_open()
1133 spin_lock(&inode->i_lock); in fifo_open()
1134 if (unlikely(inode->i_pipe)) { in fifo_open()
1135 inode->i_pipe->files++; in fifo_open()
1136 spin_unlock(&inode->i_lock); in fifo_open()
1138 pipe = inode->i_pipe; in fifo_open()
1140 inode->i_pipe = pipe; in fifo_open()
1141 spin_unlock(&inode->i_lock); in fifo_open()
1144 filp->private_data = pipe; in fifo_open()
1152 switch (filp->f_mode & (FMODE_READ | FMODE_WRITE)) { in fifo_open()
1159 pipe->r_counter++; in fifo_open()
1160 if (pipe->readers++ == 0) in fifo_open()
1163 if (!is_pipe && !pipe->writers) { in fifo_open()
1164 if ((filp->f_flags & O_NONBLOCK)) { in fifo_open()
1167 filp->f_version = pipe->w_counter; in fifo_open()
1169 if (wait_for_partner(pipe, &pipe->w_counter)) in fifo_open()
1178 * POSIX.1 says that O_NONBLOCK means return -1 with in fifo_open()
1181 ret = -ENXIO; in fifo_open()
1182 if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers) in fifo_open()
1185 pipe->w_counter++; in fifo_open()
1186 if (!pipe->writers++) in fifo_open()
1189 if (!is_pipe && !pipe->readers) { in fifo_open()
1190 if (wait_for_partner(pipe, &pipe->r_counter)) in fifo_open()
1203 pipe->readers++; in fifo_open()
1204 pipe->writers++; in fifo_open()
1205 pipe->r_counter++; in fifo_open()
1206 pipe->w_counter++; in fifo_open()
1207 if (pipe->readers == 1 || pipe->writers == 1) in fifo_open()
1212 ret = -EINVAL; in fifo_open()
1221 if (!--pipe->readers) in fifo_open()
1222 wake_up_interruptible(&pipe->wr_wait); in fifo_open()
1223 ret = -ERESTARTSYS; in fifo_open()
1227 if (!--pipe->writers) in fifo_open()
1228 wake_up_interruptible_all(&pipe->rd_wait); in fifo_open()
1229 ret = -ERESTARTSYS; in fifo_open()
1252 * Currently we rely on the pipe array holding a power-of-2 number
1271 * occupancy doesn't exceed nr_slots; if it does, EBUSY will be
1277 unsigned int head, tail, mask, n; in pipe_resize_ring() local
1282 return -ENOMEM; in pipe_resize_ring()
1284 spin_lock_irq(&pipe->rd_wait.lock); in pipe_resize_ring()
1285 mask = pipe->ring_size - 1; in pipe_resize_ring()
1286 head = pipe->head; in pipe_resize_ring()
1287 tail = pipe->tail; in pipe_resize_ring()
1289 n = pipe_occupancy(head, tail); in pipe_resize_ring()
1291 spin_unlock_irq(&pipe->rd_wait.lock); in pipe_resize_ring()
1293 return -EBUSY; in pipe_resize_ring()
1301 unsigned int h = head & mask; in pipe_resize_ring()
1302 unsigned int t = tail & mask; in pipe_resize_ring() local
1303 if (h > t) { in pipe_resize_ring()
1304 memcpy(bufs, pipe->bufs + t, in pipe_resize_ring()
1307 unsigned int tsize = pipe->ring_size - t; in pipe_resize_ring()
1309 memcpy(bufs + tsize, pipe->bufs, in pipe_resize_ring()
1311 memcpy(bufs, pipe->bufs + t, in pipe_resize_ring()
1316 head = n; in pipe_resize_ring()
1319 kfree(pipe->bufs); in pipe_resize_ring()
1320 pipe->bufs = bufs; in pipe_resize_ring()
1321 pipe->ring_size = nr_slots; in pipe_resize_ring()
1322 if (pipe->max_usage > nr_slots) in pipe_resize_ring()
1323 pipe->max_usage = nr_slots; in pipe_resize_ring()
1324 pipe->tail = tail; in pipe_resize_ring()
1325 pipe->head = head; in pipe_resize_ring()
1328 pipe->max_usage = nr_slots; in pipe_resize_ring()
1329 pipe->nr_accounted = nr_slots; in pipe_resize_ring()
1332 spin_unlock_irq(&pipe->rd_wait.lock); in pipe_resize_ring()
1335 wake_up_interruptible(&pipe->wr_wait); in pipe_resize_ring()
1341 * pipe size if successful, or return -ERROR on error.
1350 return -EBUSY; in pipe_set_size()
1356 return -EINVAL; in pipe_set_size()
1365 if (nr_slots > pipe->max_usage && in pipe_set_size()
1367 return -EPERM; in pipe_set_size()
1369 user_bufs = account_pipe_buffers(pipe->user, pipe->nr_accounted, nr_slots); in pipe_set_size()
1371 if (nr_slots > pipe->max_usage && in pipe_set_size()
1375 ret = -EPERM; in pipe_set_size()
1383 return pipe->max_usage * PAGE_SIZE; in pipe_set_size()
1386 (void) account_pipe_buffers(pipe->user, nr_slots, pipe->nr_accounted); in pipe_set_size()
1391 * Note that i_pipe and i_cdev share the same location, so checking ->i_pipe is
1396 struct pipe_inode_info *pipe = file->private_data; in get_pipe_info()
1398 if (file->f_op != &pipefifo_fops || !pipe) in get_pipe_info()
1412 return -EBADF; in pipe_fcntl()
1421 ret = pipe->max_usage * PAGE_SIZE; in pipe_fcntl()
1424 ret = -EINVAL; in pipe_fcntl()
1438 * pipefs should _never_ be mounted by userland - too much of security hassle,
1439 * no real gain from having the whole whorehouse mounted. So we don't need
1440 * any operations on the root directory. However, we need a non-trivial
1441 * d_name - pipe: will go nicely and kill the special-casing in procfs.
1448 return -ENOMEM; in pipefs_init_fs_context()
1449 ctx->ops = &pipefs_ops; in pipefs_init_fs_context()
1450 ctx->dops = &pipefs_dentry_operations; in pipefs_init_fs_context()
1470 return -EINVAL; in do_proc_dopipe_max_size_conv()
1490 .procname = "pipe-max-size",
1497 .procname = "pipe-user-pages-hard",
1504 .procname = "pipe-user-pages-soft",