Lines Matching +full:no +full:- +full:unaligned +full:- +full:direct +full:- +full:access
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
30 #include <linux/backing-dev.h>
47 struct xfs_mount *mp = ip->i_mount; in xfs_is_falloc_aligned()
51 if (!is_power_of_2(mp->m_sb.sb_rextsize)) { in xfs_is_falloc_aligned()
55 rextbytes = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize); in xfs_is_falloc_aligned()
62 mask = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize) - 1; in xfs_is_falloc_aligned()
64 mask = mp->m_sb.sb_blocksize - 1; in xfs_is_falloc_aligned()
72 * as there is no file data to flush, and thus also no need for explicit
73 * cache flush operations, and there are no non-transaction metadata updates
83 struct xfs_inode *ip = XFS_I(file->f_mapping->host); in xfs_dir_fsync()
96 if (datasync && !(ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP)) in xfs_fsync_seq()
98 return ip->i_itemp->ili_commit_seq; in xfs_fsync_seq()
126 error = xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC, in xfs_fsync_flush_log()
129 spin_lock(&ip->i_itemp->ili_lock); in xfs_fsync_flush_log()
130 ip->i_itemp->ili_fsync_fields = 0; in xfs_fsync_flush_log()
131 spin_unlock(&ip->i_itemp->ili_lock); in xfs_fsync_flush_log()
144 struct xfs_inode *ip = XFS_I(file->f_mapping->host); in xfs_file_fsync()
145 struct xfs_mount *mp = ip->i_mount; in xfs_file_fsync()
156 return -EIO; in xfs_file_fsync()
167 error = blkdev_issue_flush(mp->m_rtdev_targp->bt_bdev); in xfs_file_fsync()
168 else if (mp->m_logdev_targp != mp->m_ddev_targp) in xfs_file_fsync()
169 error = blkdev_issue_flush(mp->m_ddev_targp->bt_bdev); in xfs_file_fsync()
185 * a no-op we might have to flush the data device cache here. in xfs_file_fsync()
191 mp->m_logdev_targp == mp->m_ddev_targp) { in xfs_file_fsync()
192 err2 = blkdev_issue_flush(mp->m_ddev_targp->bt_bdev); in xfs_file_fsync()
205 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp)); in xfs_ilock_iocb()
207 if (iocb->ki_flags & IOCB_NOWAIT) { in xfs_ilock_iocb()
209 return -EAGAIN; in xfs_ilock_iocb()
223 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp)); in xfs_ilock_iocb_for_write()
243 /* get a shared lock if no remapping in progress */ in xfs_ilock_for_write_fault()
259 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp)); in xfs_file_dio_read()
267 file_accessed(iocb->ki_filp); in xfs_file_dio_read()
283 struct xfs_inode *ip = XFS_I(iocb->ki_filp->f_mapping->host); in xfs_file_dax_read()
297 file_accessed(iocb->ki_filp); in xfs_file_dax_read()
306 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp)); in xfs_file_buffered_read()
325 struct inode *inode = file_inode(iocb->ki_filp); in xfs_file_read_iter()
326 struct xfs_mount *mp = XFS_I(inode)->i_mount; in xfs_file_read_iter()
332 return -EIO; in xfs_file_read_iter()
336 else if (iocb->ki_flags & IOCB_DIRECT) in xfs_file_read_iter()
356 struct xfs_mount *mp = ip->i_mount; in xfs_file_splice_read()
362 return -EIO; in xfs_file_splice_read()
375 * Common pre-write limit and setup checks.
379 * if called for a direct write beyond i_size.
387 struct file *file = iocb->ki_filp; in xfs_file_write_checks()
388 struct inode *inode = file->f_mapping->host; in xfs_file_write_checks()
400 if (iocb->ki_flags & IOCB_NOWAIT) { in xfs_file_write_checks()
402 if (error == -EWOULDBLOCK) in xfs_file_write_checks()
403 error = -EAGAIN; in xfs_file_write_checks()
446 if (iocb->ki_pos <= i_size_read(inode)) in xfs_file_write_checks()
449 spin_lock(&ip->i_flags_lock); in xfs_file_write_checks()
451 if (iocb->ki_pos > isize) { in xfs_file_write_checks()
452 spin_unlock(&ip->i_flags_lock); in xfs_file_write_checks()
454 if (iocb->ki_flags & IOCB_NOWAIT) in xfs_file_write_checks()
455 return -EAGAIN; in xfs_file_write_checks()
467 * we now need to wait for all of them to drain. Non-AIO in xfs_file_write_checks()
470 * no-op. in xfs_file_write_checks()
477 trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize); in xfs_file_write_checks()
478 error = xfs_zero_range(ip, isize, iocb->ki_pos - isize, NULL); in xfs_file_write_checks()
482 spin_unlock(&ip->i_flags_lock); in xfs_file_write_checks()
495 struct inode *inode = file_inode(iocb->ki_filp); in xfs_dio_write_end_io()
497 loff_t offset = iocb->ki_pos; in xfs_dio_write_end_io()
502 if (xfs_is_shutdown(ip->i_mount)) in xfs_dio_write_end_io()
503 return -EIO; in xfs_dio_write_end_io()
514 XFS_STATS_ADD(ip->i_mount, xs_write_bytes, size); in xfs_dio_write_end_io()
519 * task-wide nofs context for the following operations. in xfs_dio_write_end_io()
530 * Unwritten conversion updates the in-core isize after extent in xfs_dio_write_end_io()
531 * conversion but before updating the on-disk size. Updating isize any in xfs_dio_write_end_io()
541 * We need to update the in-core inode size here so that we don't end up in xfs_dio_write_end_io()
542 * with the on-disk inode size being outside the in-core inode size. We in xfs_dio_write_end_io()
543 * have no other method of updating EOF for AIO, so always do it here in xfs_dio_write_end_io()
561 spin_lock(&ip->i_flags_lock); in xfs_dio_write_end_io()
564 spin_unlock(&ip->i_flags_lock); in xfs_dio_write_end_io()
567 spin_unlock(&ip->i_flags_lock); in xfs_dio_write_end_io()
580 * Handle block aligned direct I/O writes
617 * Handle block unaligned direct I/O writes
619 * In most cases direct I/O writes will be done holding IOLOCK_SHARED, allowing
620 * them to be done in parallel with reads and other direct I/O writes. However,
621 * if the I/O is not aligned to filesystem blocks, the direct I/O layer may need
622 * to do sub-block zeroing and that requires serialisation against other direct
624 * the unaligned I/O so that we don't get racing block zeroing in the dio layer.
625 * In the case where sub-block zeroing is not required, we can do concurrent
626 * sub-block dios to the same block successfully.
629 * IOMAP_DIO_OVERWRITE_ONLY flag to tell the lower layers to return -EAGAIN
646 * Extending writes need exclusivity because of the sub-block zeroing in xfs_file_dio_write_unaligned()
650 if (iocb->ki_pos > isize || iocb->ki_pos + count >= isize) { in xfs_file_dio_write_unaligned()
651 if (iocb->ki_flags & IOCB_NOWAIT) in xfs_file_dio_write_unaligned()
652 return -EAGAIN; in xfs_file_dio_write_unaligned()
663 * We can't properly handle unaligned direct I/O to reflink files yet, in xfs_file_dio_write_unaligned()
668 ret = -ENOTBLK; in xfs_file_dio_write_unaligned()
677 * If we are doing exclusive unaligned I/O, this must be the only I/O in xfs_file_dio_write_unaligned()
678 * in-flight. Otherwise we risk data corruption due to unwritten extent in xfs_file_dio_write_unaligned()
690 * Retry unaligned I/O with exclusive blocking semantics if the DIO in xfs_file_dio_write_unaligned()
694 if (ret == -EAGAIN && !(iocb->ki_flags & IOCB_NOWAIT)) { in xfs_file_dio_write_unaligned()
711 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp)); in xfs_file_dio_write()
715 /* direct I/O must be aligned to device logical sector size */ in xfs_file_dio_write()
716 if ((iocb->ki_pos | count) & target->bt_logical_sectormask) in xfs_file_dio_write()
717 return -EINVAL; in xfs_file_dio_write()
718 if ((iocb->ki_pos | count) & ip->i_mount->m_blockmask) in xfs_file_dio_write()
728 struct inode *inode = iocb->ki_filp->f_mapping->host; in xfs_file_dax_write()
741 pos = iocb->ki_pos; in xfs_file_dax_write()
745 if (ret > 0 && iocb->ki_pos > i_size_read(inode)) { in xfs_file_dax_write()
746 i_size_write(inode, iocb->ki_pos); in xfs_file_dax_write()
756 XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret); in xfs_file_dax_write()
758 /* Handle various SYNC-type writes */ in xfs_file_dax_write()
769 struct inode *inode = iocb->ki_filp->f_mapping->host; in xfs_file_buffered_write()
799 if (ret == -EDQUOT && !cleared_space) { in xfs_file_buffered_write()
804 } else if (ret == -ENOSPC && !cleared_space) { in xfs_file_buffered_write()
808 xfs_flush_inodes(ip->i_mount); in xfs_file_buffered_write()
812 xfs_blockgc_free_space(ip->i_mount, &icw); in xfs_file_buffered_write()
821 XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret); in xfs_file_buffered_write()
822 /* Handle various SYNC-type writes */ in xfs_file_buffered_write()
833 struct inode *inode = iocb->ki_filp->f_mapping->host; in xfs_file_write_iter()
838 XFS_STATS_INC(ip->i_mount, xs_write_calls); in xfs_file_write_iter()
843 if (xfs_is_shutdown(ip->i_mount)) in xfs_file_write_iter()
844 return -EIO; in xfs_file_write_iter()
849 if (iocb->ki_flags & IOCB_DIRECT) { in xfs_file_write_iter()
857 if (ret != -ENOTBLK) in xfs_file_write_iter()
884 page = dax_layout_busy_page(inode->i_mapping); in xfs_break_dax_layouts()
889 return ___wait_var_event(&page->_refcount, in xfs_break_dax_layouts()
890 atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE, in xfs_break_dax_layouts()
918 error = -EINVAL; in xfs_break_layouts()
930 if (xfs_has_wsync(ip->i_mount)) in xfs_file_sync_writes()
932 if (filp->f_flags & (__O_SYNC | O_DSYNC)) in xfs_file_sync_writes()
959 if (!S_ISREG(inode->i_mode)) in xfs_file_fallocate()
960 return -EINVAL; in xfs_file_fallocate()
962 return -EOPNOTSUPP; in xfs_file_fallocate()
974 * require the in-memory size to be fully up-to-date. in xfs_file_fallocate()
1009 error = -EINVAL; in xfs_file_fallocate()
1014 * There is no need to overlap collapse range with EOF, in xfs_file_fallocate()
1018 error = -EINVAL; in xfs_file_fallocate()
1022 new_size = i_size_read(inode) - len; in xfs_file_fallocate()
1031 error = -EINVAL; in xfs_file_fallocate()
1036 * New inode size must not exceed ->s_maxbytes, accounting for in xfs_file_fallocate()
1039 if (inode->i_sb->s_maxbytes - isize < len) { in xfs_file_fallocate()
1040 error = -EFBIG; in xfs_file_fallocate()
1047 error = -EINVAL; in xfs_file_fallocate()
1068 * still zero-valued by virtue of the hole punch. in xfs_file_fallocate()
1078 len = round_up(offset + len, blksize) - in xfs_file_fallocate()
1091 error = -EOPNOTSUPP; in xfs_file_fallocate()
1118 * leave shifted extents past EOF and hence losing access to in xfs_file_fallocate()
1173 struct xfs_mount *mp = src->i_mount; in xfs_file_remap_range()
1179 return -EINVAL; in xfs_file_remap_range()
1182 return -EOPNOTSUPP; in xfs_file_remap_range()
1185 return -EIO; in xfs_file_remap_range()
1207 (src->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) && in xfs_file_remap_range()
1209 !(dest->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)) in xfs_file_remap_range()
1210 cowextsize = src->i_cowextsize; in xfs_file_remap_range()
1231 if (xfs_is_shutdown(XFS_M(inode->i_sb))) in xfs_file_open()
1232 return -EIO; in xfs_file_open()
1233 file->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC | FMODE_BUF_WASYNC | in xfs_file_open()
1252 * If there are any blocks, read-ahead block 0 as we're almost in xfs_dir_open()
1256 if (ip->i_df.if_nextents > 0) in xfs_dir_open()
1288 * point we can change the ->readdir prototype to include the in xfs_file_readdir()
1291 bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE, ip->i_disk_size); in xfs_file_readdir()
1302 struct inode *inode = file->f_mapping->host; in xfs_file_llseek()
1304 if (xfs_is_shutdown(XFS_I(inode)->i_mount)) in xfs_file_llseek()
1305 return -EIO; in xfs_file_llseek()
1320 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes); in xfs_file_llseek()
1332 (write_fault && !vmf->cow_page) ? in xfs_dax_fault()
1355 * invalidate_lock (vfs/XFS_MMAPLOCK - truncate serialisation)
1357 * i_lock (XFS - extent map serialisation)
1365 struct inode *inode = file_inode(vmf->vma->vm_file); in __xfs_filemap_fault()
1373 sb_start_pagefault(inode->i_sb); in __xfs_filemap_fault()
1374 file_update_time(vmf->vma->vm_file); in __xfs_filemap_fault()
1396 sb_end_pagefault(inode->i_sb); in __xfs_filemap_fault()
1404 return (vmf->flags & FAULT_FLAG_WRITE) && in xfs_is_write_fault()
1405 (vmf->vma->vm_flags & VM_SHARED); in xfs_is_write_fault()
1414 IS_DAX(file_inode(vmf->vma->vm_file)) && in xfs_filemap_fault()
1423 if (!IS_DAX(file_inode(vmf->vma->vm_file))) in xfs_filemap_huge_fault()
1468 * We don't support synchronous mappings for non-DAX files and in xfs_file_mmap()
1471 if (!daxdev_mapping_supported(vma, target->bt_daxdev)) in xfs_file_mmap()
1472 return -EOPNOTSUPP; in xfs_file_mmap()
1475 vma->vm_ops = &xfs_file_vm_ops; in xfs_file_mmap()