Lines Matching +full:add +full:- +full:fs
2 * linux/fs/ioctl.c
11 #include <linux/fs.h>
25 * vfs_ioctl - call filesystem specific ioctl methods
28 * @arg: command-specific argument for ioctl
30 * Invokes filesystem specific ->unlocked_ioctl, if one exists; otherwise
31 * returns -ENOTTY.
33 * Returns 0 on success, -errno on error.
38 int error = -ENOTTY; in vfs_ioctl()
40 if (!filp->f_op || !filp->f_op->unlocked_ioctl) in vfs_ioctl()
43 error = filp->f_op->unlocked_ioctl(filp, cmd, arg); in vfs_ioctl()
44 if (error == -ENOIOCTLCMD) in vfs_ioctl()
45 error = -ENOTTY; in vfs_ioctl()
52 struct address_space *mapping = filp->f_mapping; in ioctl_fibmap()
56 if (!mapping->a_ops->bmap) in ioctl_fibmap()
57 return -EINVAL; in ioctl_fibmap()
59 return -EPERM; in ioctl_fibmap()
63 res = mapping->a_ops->bmap(mapping, block); in ioctl_fibmap()
68 * fiemap_fill_next_extent - Fiemap helper function
69 * @fieinfo: Fiemap context passed into ->fiemap
75 * Called from file system ->fiemap callback. Will populate extent
79 * Returns 0 on success, -errno on error, 1 if this was the last
89 struct fiemap_extent __user *dest = fieinfo->fi_extents_start; in fiemap_fill_next_extent()
92 if (fieinfo->fi_extents_max == 0) { in fiemap_fill_next_extent()
93 fieinfo->fi_extents_mapped++; in fiemap_fill_next_extent()
97 if (fieinfo->fi_extents_mapped >= fieinfo->fi_extents_max) in fiemap_fill_next_extent()
113 dest += fieinfo->fi_extents_mapped; in fiemap_fill_next_extent()
115 return -EFAULT; in fiemap_fill_next_extent()
117 fieinfo->fi_extents_mapped++; in fiemap_fill_next_extent()
118 if (fieinfo->fi_extents_mapped == fieinfo->fi_extents_max) in fiemap_fill_next_extent()
125 * fiemap_check_flags - check validity of requested flags for fiemap
126 * @fieinfo: Fiemap context passed into ->fiemap
129 * Called from file system ->fiemap callback. This will compute the
130 * intersection of valid fiemap flags and those that the fs supports. That
133 * -EBADR is returned, which tells ioctl_fiemap() to return those values to
134 * userspace. For this reason, a return code of -EBADR should be preserved.
136 * Returns 0 on success, -EBADR on bad flags.
142 incompat_flags = fieinfo->fi_flags & ~(FIEMAP_FLAGS_COMPAT & fs_flags); in fiemap_check_flags()
144 fieinfo->fi_flags = incompat_flags; in fiemap_check_flags()
145 return -EBADR; in fiemap_check_flags()
154 u64 maxbytes = (u64) sb->s_maxbytes; in fiemap_check_ranges()
159 return -EINVAL; in fiemap_check_ranges()
162 return -EFBIG; in fiemap_check_ranges()
165 * Shrink request scope to what the fs can actually handle. in fiemap_check_ranges()
167 if (len > maxbytes || (maxbytes - len) < start) in fiemap_check_ranges()
168 *new_len = maxbytes - start; in fiemap_check_ranges()
178 struct inode *inode = filp->f_path.dentry->d_inode; in ioctl_fiemap()
179 struct super_block *sb = inode->i_sb; in ioctl_fiemap()
183 if (!inode->i_op->fiemap) in ioctl_fiemap()
184 return -EOPNOTSUPP; in ioctl_fiemap()
187 return -EFAULT; in ioctl_fiemap()
190 return -EINVAL; in ioctl_fiemap()
199 fieinfo.fi_extents_start = ufiemap->fm_extents; in ioctl_fiemap()
204 return -EFAULT; in ioctl_fiemap()
207 filemap_write_and_wait(inode->i_mapping); in ioctl_fiemap()
209 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len); in ioctl_fiemap()
213 error = -EFAULT; in ioctl_fiemap()
222 return (offset >> inode->i_blkbits); in logical_to_blk()
227 return (blk << inode->i_blkbits); in blk_to_logical()
231 * __generic_block_fiemap - FIEMAP for block based inodes (no locking)
236 * @get_block: the fs's get_block function
242 * If it is possible to have data blocks beyond a hole past @inode->i_size, then
284 last_blk = logical_to_blk(inode, start + len - 1); in __generic_block_fiemap()
334 * map, and it wasn't the entire file, so add the extent in __generic_block_fiemap()
344 * are good to go, just add the extent to the fieinfo in __generic_block_fiemap()
356 * to add, so add it. in __generic_block_fiemap()
393 * generic_block_fiemap - FIEMAP for block based inodes
398 * @get_block: The block mapping function for the fs
409 mutex_lock(&inode->i_mutex); in generic_block_fiemap()
411 mutex_unlock(&inode->i_mutex); in generic_block_fiemap()
419 * This provides compatibility with legacy XFS pre-allocation ioctls
427 struct inode *inode = filp->f_path.dentry->d_inode; in ioctl_preallocate()
431 return -EFAULT; in ioctl_preallocate()
437 sr.l_start += filp->f_pos; in ioctl_preallocate()
443 return -EINVAL; in ioctl_preallocate()
452 struct inode *inode = filp->f_path.dentry->d_inode; in file_ioctl()
459 return put_user(i_size_read(inode) - filp->f_pos, p); in file_ioctl()
482 spin_lock(&filp->f_lock); in ioctl_fionbio()
484 filp->f_flags |= flag; in ioctl_fionbio()
486 filp->f_flags &= ~flag; in ioctl_fionbio()
487 spin_unlock(&filp->f_lock); in ioctl_fionbio()
503 if ((flag ^ filp->f_flags) & FASYNC) { in ioctl_fioasync()
504 if (filp->f_op && filp->f_op->fasync) in ioctl_fioasync()
505 /* fasync() adjusts filp->f_flags */ in ioctl_fioasync()
506 error = filp->f_op->fasync(fd, filp, on); in ioctl_fioasync()
508 error = -ENOTTY; in ioctl_fioasync()
515 struct super_block *sb = filp->f_path.dentry->d_inode->i_sb; in ioctl_fsfreeze()
518 return -EPERM; in ioctl_fsfreeze()
521 if (sb->s_op->freeze_fs == NULL) in ioctl_fsfreeze()
522 return -EOPNOTSUPP; in ioctl_fsfreeze()
530 struct super_block *sb = filp->f_path.dentry->d_inode->i_sb; in ioctl_fsthaw()
533 return -EPERM; in ioctl_fsthaw()
540 * When you add any new common ioctls to the switches above and below
551 struct inode *inode = filp->f_path.dentry->d_inode; in do_vfs_ioctl()
571 if (S_ISDIR(inode->i_mode) || S_ISREG(inode->i_mode) || in do_vfs_ioctl()
572 S_ISLNK(inode->i_mode)) { in do_vfs_ioctl()
575 -EFAULT : 0; in do_vfs_ioctl()
577 error = -ENOTTY; in do_vfs_ioctl()
592 return put_user(inode->i_sb->s_blocksize, argp); in do_vfs_ioctl()
595 if (S_ISREG(inode->i_mode)) in do_vfs_ioctl()
607 int error = -EBADF; in SYSCALL_DEFINE3()