1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * file.c
4 *
5 * PURPOSE
6 * File handling routines for the OSTA-UDF(tm) filesystem.
7 *
8 * COPYRIGHT
9 * (C) 1998-1999 Dave Boynton
10 * (C) 1998-2004 Ben Fennema
11 * (C) 1999-2000 Stelias Computing Inc
12 *
13 * HISTORY
14 *
15 * 10/02/98 dgb Attempt to integrate into udf.o
16 * 10/07/98 Switched to using generic_readpage, etc., like isofs
17 * And it works!
18 * 12/06/98 blf Added udf_file_read. uses generic_file_read for all cases but
19 * ICBTAG_FLAG_AD_IN_ICB.
20 * 04/06/99 64 bit file handling on 32 bit systems taken from ext2 file.c
21 * 05/12/99 Preliminary file write support
22 */
23
24 #include "udfdecl.h"
25 #include <linux/fs.h>
26 #include <linux/uaccess.h>
27 #include <linux/kernel.h>
28 #include <linux/string.h> /* memset */
29 #include <linux/capability.h>
30 #include <linux/errno.h>
31 #include <linux/filelock.h>
32 #include <linux/pagemap.h>
33 #include <linux/uio.h>
34
35 #include "udf_i.h"
36 #include "udf_sb.h"
37
udf_page_mkwrite(struct vm_fault * vmf)38 static vm_fault_t udf_page_mkwrite(struct vm_fault *vmf)
39 {
40 struct vm_area_struct *vma = vmf->vma;
41 struct inode *inode = file_inode(vma->vm_file);
42 struct address_space *mapping = inode->i_mapping;
43 struct folio *folio = page_folio(vmf->page);
44 loff_t size;
45 unsigned int end;
46 vm_fault_t ret = VM_FAULT_LOCKED;
47 int err;
48
49 sb_start_pagefault(inode->i_sb);
50 file_update_time(vma->vm_file);
51 filemap_invalidate_lock_shared(mapping);
52 folio_lock(folio);
53 size = i_size_read(inode);
54 if (folio->mapping != inode->i_mapping || folio_pos(folio) >= size) {
55 folio_unlock(folio);
56 ret = VM_FAULT_NOPAGE;
57 goto out_unlock;
58 }
59 /* Space is already allocated for in-ICB file */
60 if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
61 goto out_dirty;
62 if (folio->index == size >> PAGE_SHIFT)
63 end = size & ~PAGE_MASK;
64 else
65 end = PAGE_SIZE;
66 err = __block_write_begin(folio, 0, end, udf_get_block);
67 if (err) {
68 folio_unlock(folio);
69 ret = vmf_fs_error(err);
70 goto out_unlock;
71 }
72
73 block_commit_write(folio, 0, end);
74 out_dirty:
75 folio_mark_dirty(folio);
76 folio_wait_stable(folio);
77 out_unlock:
78 filemap_invalidate_unlock_shared(mapping);
79 sb_end_pagefault(inode->i_sb);
80 return ret;
81 }
82
83 static const struct vm_operations_struct udf_file_vm_ops = {
84 .fault = filemap_fault,
85 .map_pages = filemap_map_pages,
86 .page_mkwrite = udf_page_mkwrite,
87 };
88
udf_file_write_iter(struct kiocb * iocb,struct iov_iter * from)89 static ssize_t udf_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
90 {
91 ssize_t retval;
92 struct file *file = iocb->ki_filp;
93 struct inode *inode = file_inode(file);
94 struct udf_inode_info *iinfo = UDF_I(inode);
95
96 inode_lock(inode);
97
98 retval = generic_write_checks(iocb, from);
99 if (retval <= 0)
100 goto out;
101
102 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB &&
103 inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) +
104 iocb->ki_pos + iov_iter_count(from))) {
105 filemap_invalidate_lock(inode->i_mapping);
106 retval = udf_expand_file_adinicb(inode);
107 filemap_invalidate_unlock(inode->i_mapping);
108 if (retval)
109 goto out;
110 }
111
112 retval = __generic_file_write_iter(iocb, from);
113 out:
114 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB && retval > 0) {
115 down_write(&iinfo->i_data_sem);
116 iinfo->i_lenAlloc = inode->i_size;
117 up_write(&iinfo->i_data_sem);
118 }
119 inode_unlock(inode);
120
121 if (retval > 0) {
122 mark_inode_dirty(inode);
123 retval = generic_write_sync(iocb, retval);
124 }
125
126 return retval;
127 }
128
udf_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)129 long udf_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
130 {
131 struct inode *inode = file_inode(filp);
132 long old_block, new_block;
133 int result;
134
135 if (file_permission(filp, MAY_READ) != 0) {
136 udf_debug("no permission to access inode %llu\n", inode->i_ino);
137 return -EPERM;
138 }
139
140 if (!arg && ((cmd == UDF_GETVOLIDENT) || (cmd == UDF_GETEASIZE) ||
141 (cmd == UDF_RELOCATE_BLOCKS) || (cmd == UDF_GETEABLOCK))) {
142 udf_debug("invalid argument to udf_ioctl\n");
143 return -EINVAL;
144 }
145
146 switch (cmd) {
147 case UDF_GETVOLIDENT:
148 if (copy_to_user((char __user *)arg,
149 UDF_SB(inode->i_sb)->s_volume_ident, 32))
150 return -EFAULT;
151 return 0;
152 case UDF_RELOCATE_BLOCKS:
153 if (!capable(CAP_SYS_ADMIN))
154 return -EPERM;
155 if (get_user(old_block, (long __user *)arg))
156 return -EFAULT;
157 result = udf_relocate_blocks(inode->i_sb,
158 old_block, &new_block);
159 if (result == 0)
160 result = put_user(new_block, (long __user *)arg);
161 return result;
162 case UDF_GETEASIZE:
163 return put_user(UDF_I(inode)->i_lenEAttr, (int __user *)arg);
164 case UDF_GETEABLOCK:
165 return copy_to_user((char __user *)arg,
166 UDF_I(inode)->i_data,
167 UDF_I(inode)->i_lenEAttr) ? -EFAULT : 0;
168 default:
169 return -ENOIOCTLCMD;
170 }
171
172 return 0;
173 }
174
udf_release_file(struct inode * inode,struct file * filp)175 static int udf_release_file(struct inode *inode, struct file *filp)
176 {
177 if (filp->f_mode & FMODE_WRITE &&
178 atomic_read(&inode->i_writecount) == 1) {
179 /*
180 * Grab i_mutex to avoid races with writes changing i_size
181 * while we are running.
182 */
183 inode_lock(inode);
184 down_write(&UDF_I(inode)->i_data_sem);
185 udf_discard_prealloc(inode);
186 udf_truncate_tail_extent(inode);
187 up_write(&UDF_I(inode)->i_data_sem);
188 inode_unlock(inode);
189 }
190 return 0;
191 }
192
udf_file_mmap(struct file * file,struct vm_area_struct * vma)193 static int udf_file_mmap(struct file *file, struct vm_area_struct *vma)
194 {
195 file_accessed(file);
196 vma->vm_ops = &udf_file_vm_ops;
197
198 return 0;
199 }
200
udf_fsync(struct file * file,loff_t start,loff_t end,int datasync)201 int udf_fsync(struct file *file, loff_t start, loff_t end, int datasync)
202 {
203 return mmb_fsync(file,
204 &UDF_I(file->f_mapping->host)->i_metadata_bhs,
205 start, end, datasync);
206 }
207
208 const struct file_operations udf_file_operations = {
209 .read_iter = generic_file_read_iter,
210 .unlocked_ioctl = udf_ioctl,
211 .open = generic_file_open,
212 .mmap = udf_file_mmap,
213 .write_iter = udf_file_write_iter,
214 .release = udf_release_file,
215 .fsync = udf_fsync,
216 .splice_read = filemap_splice_read,
217 .splice_write = iter_file_splice_write,
218 .llseek = generic_file_llseek,
219 .setlease = generic_setlease,
220 };
221
udf_setattr(struct mnt_idmap * idmap,struct dentry * dentry,struct iattr * attr)222 static int udf_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
223 struct iattr *attr)
224 {
225 struct inode *inode = d_inode(dentry);
226 struct super_block *sb = inode->i_sb;
227 int error;
228
229 error = setattr_prepare(&nop_mnt_idmap, dentry, attr);
230 if (error)
231 return error;
232
233 if ((attr->ia_valid & ATTR_UID) &&
234 UDF_QUERY_FLAG(sb, UDF_FLAG_UID_SET) &&
235 !uid_eq(attr->ia_uid, UDF_SB(sb)->s_uid))
236 return -EPERM;
237 if ((attr->ia_valid & ATTR_GID) &&
238 UDF_QUERY_FLAG(sb, UDF_FLAG_GID_SET) &&
239 !gid_eq(attr->ia_gid, UDF_SB(sb)->s_gid))
240 return -EPERM;
241
242 if ((attr->ia_valid & ATTR_SIZE) &&
243 attr->ia_size != i_size_read(inode)) {
244 filemap_invalidate_lock(inode->i_mapping);
245 error = udf_setsize(inode, attr->ia_size);
246 filemap_invalidate_unlock(inode->i_mapping);
247 if (error)
248 return error;
249 }
250
251 if (attr->ia_valid & ATTR_MODE)
252 udf_update_extra_perms(inode, attr->ia_mode);
253
254 setattr_copy(&nop_mnt_idmap, inode, attr);
255 mark_inode_dirty(inode);
256 return 0;
257 }
258
259 const struct inode_operations udf_file_inode_operations = {
260 .setattr = udf_setattr,
261 };
262