Lines Matching +full:no +full:- +full:unaligned +full:- +full:direct +full:- +full:access

1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
33 #include <linux/backing-dev.h>
56 return !((pos | len) & (alloc_unit - 1)); in xfs_is_falloc_aligned()
61 * as there is no file data to flush, and thus also no need for explicit
62 * cache flush operations, and there are no non-transaction metadata updates
72 struct xfs_inode *ip = XFS_I(file->f_mapping->host); in xfs_dir_fsync()
85 if (datasync && !(ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP)) in xfs_fsync_seq()
87 return ip->i_itemp->ili_commit_seq; in xfs_fsync_seq()
115 error = xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC, in xfs_fsync_flush_log()
118 spin_lock(&ip->i_itemp->ili_lock); in xfs_fsync_flush_log()
119 ip->i_itemp->ili_fsync_fields = 0; in xfs_fsync_flush_log()
120 spin_unlock(&ip->i_itemp->ili_lock); in xfs_fsync_flush_log()
133 struct xfs_inode *ip = XFS_I(file->f_mapping->host); in xfs_file_fsync()
134 struct xfs_mount *mp = ip->i_mount; in xfs_file_fsync()
145 return -EIO; in xfs_file_fsync()
155 if (XFS_IS_REALTIME_INODE(ip) && mp->m_rtdev_targp != mp->m_ddev_targp) in xfs_file_fsync()
156 error = blkdev_issue_flush(mp->m_rtdev_targp->bt_bdev); in xfs_file_fsync()
157 else if (mp->m_logdev_targp != mp->m_ddev_targp) in xfs_file_fsync()
158 error = blkdev_issue_flush(mp->m_ddev_targp->bt_bdev); in xfs_file_fsync()
174 * a no-op we might have to flush the data device cache here. in xfs_file_fsync()
180 mp->m_logdev_targp == mp->m_ddev_targp) { in xfs_file_fsync()
181 err2 = blkdev_issue_flush(mp->m_ddev_targp->bt_bdev); in xfs_file_fsync()
194 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp)); in xfs_ilock_iocb()
196 if (iocb->ki_flags & IOCB_NOWAIT) { in xfs_ilock_iocb()
198 return -EAGAIN; in xfs_ilock_iocb()
212 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp)); in xfs_ilock_iocb_for_write()
237 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp)); in xfs_file_dio_read()
245 file_accessed(iocb->ki_filp); in xfs_file_dio_read()
261 struct xfs_inode *ip = XFS_I(iocb->ki_filp->f_mapping->host); in xfs_file_dax_read()
275 file_accessed(iocb->ki_filp); in xfs_file_dax_read()
284 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp)); in xfs_file_buffered_read()
303 struct inode *inode = file_inode(iocb->ki_filp); in xfs_file_read_iter()
304 struct xfs_mount *mp = XFS_I(inode)->i_mount; in xfs_file_read_iter()
310 return -EIO; in xfs_file_read_iter()
314 else if (iocb->ki_flags & IOCB_DIRECT) in xfs_file_read_iter()
334 struct xfs_mount *mp = ip->i_mount; in xfs_file_splice_read()
340 return -EIO; in xfs_file_splice_read()
353 * Take care of zeroing post-EOF blocks when they might exist.
368 struct xfs_inode *ip = XFS_I(iocb->ki_filp->f_mapping->host); in xfs_file_write_zero_eof()
381 spin_lock(&ip->i_flags_lock); in xfs_file_write_zero_eof()
383 if (iocb->ki_pos <= isize) { in xfs_file_write_zero_eof()
384 spin_unlock(&ip->i_flags_lock); in xfs_file_write_zero_eof()
387 spin_unlock(&ip->i_flags_lock); in xfs_file_write_zero_eof()
389 if (iocb->ki_flags & IOCB_NOWAIT) in xfs_file_write_zero_eof()
390 return -EAGAIN; in xfs_file_write_zero_eof()
408 * wait for all of them to drain. Non-AIO DIO will have drained in xfs_file_write_zero_eof()
410 * cases this wait is a no-op. in xfs_file_write_zero_eof()
417 trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize); in xfs_file_write_zero_eof()
420 error = xfs_zero_range(ip, isize, iocb->ki_pos - isize, ac, NULL); in xfs_file_write_zero_eof()
427 * Common pre-write limit and setup checks.
431 * if called for a direct write beyond i_size.
440 struct inode *inode = iocb->ki_filp->f_mapping->host; in xfs_file_write_checks()
450 if (iocb->ki_flags & IOCB_NOWAIT) { in xfs_file_write_checks()
452 if (error == -EWOULDBLOCK) in xfs_file_write_checks()
453 error = -EAGAIN; in xfs_file_write_checks()
486 if (iocb->ki_pos > i_size_read(inode)) { in xfs_file_write_checks()
509 if (iocb->ki_flags & IOCB_NOWAIT) in xfs_zoned_write_space_reserve()
513 * Check the rlimit and LFS boundary first so that we don't over-reserve in xfs_zoned_write_space_reserve()
522 error = generic_write_check_limits(iocb->ki_filp, iocb->ki_pos, &count); in xfs_zoned_write_space_reserve()
532 * EOF block and the new start block if they are unaligned. in xfs_zoned_write_space_reserve()
537 XFS_B_TO_FSB(ip->i_mount, count) + 1 + 2, flags, ac); in xfs_zoned_write_space_reserve()
547 struct inode *inode = file_inode(iocb->ki_filp); in xfs_dio_write_end_io()
549 loff_t offset = iocb->ki_pos; in xfs_dio_write_end_io()
557 if (xfs_is_shutdown(ip->i_mount)) in xfs_dio_write_end_io()
558 return -EIO; in xfs_dio_write_end_io()
569 XFS_STATS_ADD(ip->i_mount, xs_write_bytes, size); in xfs_dio_write_end_io()
574 * task-wide nofs context for the following operations. in xfs_dio_write_end_io()
585 * Unwritten conversion updates the in-core isize after extent in xfs_dio_write_end_io()
586 * conversion but before updating the on-disk size. Updating isize any in xfs_dio_write_end_io()
596 * We need to update the in-core inode size here so that we don't end up in xfs_dio_write_end_io()
597 * with the on-disk inode size being outside the in-core inode size. We in xfs_dio_write_end_io()
598 * have no other method of updating EOF for AIO, so always do it here in xfs_dio_write_end_io()
616 spin_lock(&ip->i_flags_lock); in xfs_dio_write_end_io()
619 spin_unlock(&ip->i_flags_lock); in xfs_dio_write_end_io()
622 spin_unlock(&ip->i_flags_lock); in xfs_dio_write_end_io()
640 struct xfs_mount *mp = XFS_I(iter->inode)->i_mount; in xfs_dio_zoned_submit_io()
641 struct xfs_zone_alloc_ctx *ac = iter->private; in xfs_dio_zoned_submit_io()
645 count_fsb = XFS_B_TO_FSB(mp, bio->bi_iter.bi_size); in xfs_dio_zoned_submit_io()
646 if (count_fsb > ac->reserved_blocks) { in xfs_dio_zoned_submit_io()
649 count_fsb, ac->reserved_blocks); in xfs_dio_zoned_submit_io()
654 ac->reserved_blocks -= count_fsb; in xfs_dio_zoned_submit_io()
656 bio->bi_end_io = xfs_end_bio; in xfs_dio_zoned_submit_io()
657 ioend = iomap_init_ioend(iter->inode, bio, file_offset, in xfs_dio_zoned_submit_io()
659 xfs_zone_alloc_and_submit(ioend, &ac->open_zone); in xfs_dio_zoned_submit_io()
669 * Handle block aligned direct I/O writes.
707 * Handle block aligned direct I/O writes to zoned devices.
729 * Handle block unaligned direct I/O writes
731 * In most cases direct I/O writes will be done holding IOLOCK_SHARED, allowing
732 * them to be done in parallel with reads and other direct I/O writes. However,
733 * if the I/O is not aligned to filesystem blocks, the direct I/O layer may need
734 * to do sub-block zeroing and that requires serialisation against other direct
736 * the unaligned I/O so that we don't get racing block zeroing in the dio layer.
737 * In the case where sub-block zeroing is not required, we can do concurrent
738 * sub-block dios to the same block successfully.
741 * IOMAP_DIO_OVERWRITE_ONLY flag to tell the lower layers to return -EAGAIN
758 * Extending writes need exclusivity because of the sub-block zeroing in xfs_file_dio_write_unaligned()
762 if (iocb->ki_pos > isize || iocb->ki_pos + count >= isize) { in xfs_file_dio_write_unaligned()
763 if (iocb->ki_flags & IOCB_NOWAIT) in xfs_file_dio_write_unaligned()
764 return -EAGAIN; in xfs_file_dio_write_unaligned()
775 * We can't properly handle unaligned direct I/O to reflink files yet, in xfs_file_dio_write_unaligned()
780 ret = -ENOTBLK; in xfs_file_dio_write_unaligned()
789 * If we are doing exclusive unaligned I/O, this must be the only I/O in xfs_file_dio_write_unaligned()
790 * in-flight. Otherwise we risk data corruption due to unwritten extent in xfs_file_dio_write_unaligned()
802 * Retry unaligned I/O with exclusive blocking semantics if the DIO in xfs_file_dio_write_unaligned()
806 if (ret == -EAGAIN && !(iocb->ki_flags & IOCB_NOWAIT)) { in xfs_file_dio_write_unaligned()
823 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp)); in xfs_file_dio_write()
827 /* direct I/O must be aligned to device logical sector size */ in xfs_file_dio_write()
828 if ((iocb->ki_pos | count) & target->bt_logical_sectormask) in xfs_file_dio_write()
829 return -EINVAL; in xfs_file_dio_write()
837 if (((iocb->ki_pos | count) & ip->i_mount->m_blockmask) || in xfs_file_dio_write()
839 (iov_iter_alignment(from) & ip->i_mount->m_blockmask))) in xfs_file_dio_write()
852 struct inode *inode = iocb->ki_filp->f_mapping->host; in xfs_file_dax_write()
865 pos = iocb->ki_pos; in xfs_file_dax_write()
869 if (ret > 0 && iocb->ki_pos > i_size_read(inode)) { in xfs_file_dax_write()
870 i_size_write(inode, iocb->ki_pos); in xfs_file_dax_write()
880 XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret); in xfs_file_dax_write()
882 /* Handle various SYNC-type writes */ in xfs_file_dax_write()
893 struct inode *inode = iocb->ki_filp->f_mapping->host; in xfs_file_buffered_write()
923 if (ret == -EDQUOT && !cleared_space) { in xfs_file_buffered_write()
928 } else if (ret == -ENOSPC && !cleared_space) { in xfs_file_buffered_write()
932 xfs_flush_inodes(ip->i_mount); in xfs_file_buffered_write()
936 xfs_blockgc_free_space(ip->i_mount, &icw); in xfs_file_buffered_write()
945 XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret); in xfs_file_buffered_write()
946 /* Handle various SYNC-type writes */ in xfs_file_buffered_write()
957 struct xfs_inode *ip = XFS_I(iocb->ki_filp->f_mapping->host); in xfs_file_buffered_write_zoned()
958 struct xfs_mount *mp = ip->i_mount; in xfs_file_buffered_write_zoned()
983 XFS_FSB_TO_B(mp, ac.reserved_blocks) - in xfs_file_buffered_write_zoned()
984 (iocb->ki_pos & mp->m_blockmask)); in xfs_file_buffered_write_zoned()
992 if (ret == -ENOSPC && !cleared_space) { in xfs_file_buffered_write_zoned()
1018 struct inode *inode = iocb->ki_filp->f_mapping->host; in xfs_file_write_iter()
1023 XFS_STATS_INC(ip->i_mount, xs_write_calls); in xfs_file_write_iter()
1028 if (xfs_is_shutdown(ip->i_mount)) in xfs_file_write_iter()
1029 return -EIO; in xfs_file_write_iter()
1034 if (iocb->ki_flags & IOCB_ATOMIC) { in xfs_file_write_iter()
1038 * a FS block, but there is no requirement to support this. in xfs_file_write_iter()
1041 if (ocount != ip->i_mount->m_sb.sb_blocksize) in xfs_file_write_iter()
1042 return -EINVAL; in xfs_file_write_iter()
1048 if (iocb->ki_flags & IOCB_DIRECT) { in xfs_file_write_iter()
1056 if (ret != -ENOTBLK) in xfs_file_write_iter()
1070 if (xfs_has_wsync(ip->i_mount)) in xfs_file_sync_writes()
1072 if (filp->f_flags & (__O_SYNC | O_DSYNC)) in xfs_file_sync_writes()
1120 loff_t new_size = i_size_read(inode) - len; in xfs_falloc_collapse_range()
1124 return -EINVAL; in xfs_falloc_collapse_range()
1127 * There is no need to overlap collapse range with EOF, in which case it in xfs_falloc_collapse_range()
1131 return -EINVAL; in xfs_falloc_collapse_range()
1150 return -EINVAL; in xfs_falloc_insert_range()
1153 * New inode size must not exceed ->s_maxbytes, accounting for in xfs_falloc_insert_range()
1156 if (inode->i_sb->s_maxbytes - isize < len) in xfs_falloc_insert_range()
1157 return -EFBIG; in xfs_falloc_insert_range()
1161 return -EINVAL; in xfs_falloc_insert_range()
1170 * past EOF and hence losing access to the data that is contained within in xfs_falloc_insert_range()
1181 * 2.) If prealloc returns ENOSPC, the file range is still zero-valued by
1207 len = round_up(offset + len, blksize) - round_down(offset, blksize); in xfs_falloc_zero_range()
1256 return -EOPNOTSUPP; in xfs_falloc_allocate_range()
1296 * require the in-memory size to be fully up-to-date. in __xfs_file_fallocate()
1324 error = -EOPNOTSUPP; in __xfs_file_fallocate()
1364 if (!S_ISREG(inode->i_mode)) in xfs_file_fallocate()
1365 return -EINVAL; in xfs_file_fallocate()
1367 return -EOPNOTSUPP; in xfs_file_fallocate()
1422 struct xfs_mount *mp = src->i_mount; in xfs_file_remap_range()
1428 return -EINVAL; in xfs_file_remap_range()
1431 return -EOPNOTSUPP; in xfs_file_remap_range()
1434 return -EIO; in xfs_file_remap_range()
1456 (src->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) && in xfs_file_remap_range()
1458 !(dest->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)) in xfs_file_remap_range()
1459 cowextsize = src->i_cowextsize; in xfs_file_remap_range()
1474 * handle partial results -- either the whole remap succeeds, or we in xfs_file_remap_range()
1488 if (xfs_is_shutdown(XFS_M(inode->i_sb))) in xfs_file_open()
1489 return -EIO; in xfs_file_open()
1490 file->f_mode |= FMODE_NOWAIT | FMODE_CAN_ODIRECT; in xfs_file_open()
1492 file->f_mode |= FMODE_CAN_ATOMIC_WRITE; in xfs_file_open()
1505 if (xfs_is_shutdown(ip->i_mount)) in xfs_dir_open()
1506 return -EIO; in xfs_dir_open()
1512 * If there are any blocks, read-ahead block 0 as we're almost in xfs_dir_open()
1516 if (ip->i_df.if_nextents > 0) in xfs_dir_open()
1532 struct xfs_mount *mp = ip->i_mount; in xfs_file_release()
1535 * If this is a read-only mount or the file system has been shut down, in xfs_file_release()
1545 * is particularly noticeable from a truncate down, buffered (re-)write in xfs_file_release()
1552 if (ip->i_delayed_blks > 0) in xfs_file_release()
1553 filemap_flush(inode->i_mapping); in xfs_file_release()
1557 * XFS aggressively preallocates post-EOF space to generate contiguous in xfs_file_release()
1567 * This heuristic is skipped for inodes with the append-only flag as in xfs_file_release()
1570 * There is no point in freeing blocks here for open but unlinked files in xfs_file_release()
1573 * When releasing a read-only context, don't flush data or trim post-EOF in xfs_file_release()
1580 if (!inode->i_nlink || in xfs_file_release()
1581 !(file->f_mode & FMODE_WRITE) || in xfs_file_release()
1582 (ip->i_diflags & XFS_DIFLAG_APPEND) || in xfs_file_release()
1621 * point we can change the ->readdir prototype to include the in xfs_file_readdir()
1624 bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE, ip->i_disk_size); in xfs_file_readdir()
1635 struct inode *inode = file->f_mapping->host; in xfs_file_llseek()
1637 if (xfs_is_shutdown(XFS_I(inode)->i_mount)) in xfs_file_llseek()
1638 return -EIO; in xfs_file_llseek()
1653 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes); in xfs_file_llseek()
1670 (write_fault && !vmf->cow_page) ? in xfs_dax_fault_locked()
1683 struct xfs_inode *ip = XFS_I(file_inode(vmf->vma->vm_file)); in xfs_dax_read_fault()
1701 * invalidate_lock (vfs/XFS_MMAPLOCK - truncate serialisation)
1703 * i_lock (XFS - extent map serialisation)
1711 struct inode *inode = file_inode(vmf->vma->vm_file); in __xfs_write_fault()
1718 sb_start_pagefault(inode->i_sb); in __xfs_write_fault()
1719 file_update_time(vmf->vma->vm_file); in __xfs_write_fault()
1740 sb_end_pagefault(inode->i_sb); in __xfs_write_fault()
1749 struct xfs_inode *ip = XFS_I(file_inode(vmf->vma->vm_file)); in xfs_write_fault_zoned()
1750 unsigned int len = folio_size(page_folio(vmf->page)); in xfs_write_fault_zoned()
1756 * This could over-allocate as it doesn't check for truncation. in xfs_write_fault_zoned()
1761 error = xfs_zoned_space_reserve(ip, XFS_B_TO_FSB(ip->i_mount, len), 0, in xfs_write_fault_zoned()
1775 if (xfs_is_zoned_inode(XFS_I(file_inode(vmf->vma->vm_file)))) in xfs_write_fault()
1784 return (vmf->flags & FAULT_FLAG_WRITE) && in xfs_is_write_fault()
1785 (vmf->vma->vm_flags & VM_SHARED); in xfs_is_write_fault()
1792 struct inode *inode = file_inode(vmf->vma->vm_file); in xfs_filemap_fault()
1810 if (!IS_DAX(file_inode(vmf->vma->vm_file))) in xfs_filemap_huge_fault()
1855 * We don't support synchronous mappings for non-DAX files and in xfs_file_mmap()
1858 if (!daxdev_mapping_supported(vma, target->bt_daxdev)) in xfs_file_mmap()
1859 return -EOPNOTSUPP; in xfs_file_mmap()
1862 vma->vm_ops = &xfs_file_vm_ops; in xfs_file_mmap()