xref: /linux/fs/xfs/xfs_ioctl.c (revision 0b0128e64af056a7dd29fa3bc780af654e53f861)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs_platform.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_rtalloc.h"
15 #include "xfs_iwalk.h"
16 #include "xfs_itable.h"
17 #include "xfs_error.h"
18 #include "xfs_da_format.h"
19 #include "xfs_da_btree.h"
20 #include "xfs_attr.h"
21 #include "xfs_bmap.h"
22 #include "xfs_bmap_util.h"
23 #include "xfs_fsops.h"
24 #include "xfs_discard.h"
25 #include "xfs_quota.h"
26 #include "xfs_trace.h"
27 #include "xfs_icache.h"
28 #include "xfs_trans.h"
29 #include "xfs_btree.h"
30 #include <linux/fsmap.h>
31 #include "xfs_fsmap.h"
32 #include "scrub/xfs_scrub.h"
33 #include "xfs_sb.h"
34 #include "xfs_ag.h"
35 #include "xfs_health.h"
36 #include "xfs_reflink.h"
37 #include "xfs_ioctl.h"
38 #include "xfs_xattr.h"
39 #include "xfs_rtbitmap.h"
40 #include "xfs_rtrmap_btree.h"
41 #include "xfs_file.h"
42 #include "xfs_exchrange.h"
43 #include "xfs_handle.h"
44 #include "xfs_rtgroup.h"
45 #include "xfs_healthmon.h"
46 #include "xfs_verify_media.h"
47 #include "xfs_zone_priv.h"
48 #include "xfs_zone_alloc.h"
49 
50 #include <linux/mount.h>
51 #include <linux/fileattr.h>
52 
53 /* Return 0 on success or positive error */
54 int
xfs_fsbulkstat_one_fmt(struct xfs_ibulk * breq,const struct xfs_bulkstat * bstat)55 xfs_fsbulkstat_one_fmt(
56 	struct xfs_ibulk		*breq,
57 	const struct xfs_bulkstat	*bstat)
58 {
59 	struct xfs_bstat		bs1;
60 
61 	xfs_bulkstat_to_bstat(breq->mp, &bs1, bstat);
62 	if (copy_to_user(breq->ubuffer, &bs1, sizeof(bs1)))
63 		return -EFAULT;
64 	return xfs_ibulk_advance(breq, sizeof(struct xfs_bstat));
65 }
66 
67 int
xfs_fsinumbers_fmt(struct xfs_ibulk * breq,const struct xfs_inumbers * igrp)68 xfs_fsinumbers_fmt(
69 	struct xfs_ibulk		*breq,
70 	const struct xfs_inumbers	*igrp)
71 {
72 	struct xfs_inogrp		ig1;
73 
74 	xfs_inumbers_to_inogrp(&ig1, igrp);
75 	if (copy_to_user(breq->ubuffer, &ig1, sizeof(struct xfs_inogrp)))
76 		return -EFAULT;
77 	return xfs_ibulk_advance(breq, sizeof(struct xfs_inogrp));
78 }
79 
80 STATIC int
xfs_ioc_fsbulkstat(struct file * file,unsigned int cmd,void __user * arg)81 xfs_ioc_fsbulkstat(
82 	struct file		*file,
83 	unsigned int		cmd,
84 	void			__user *arg)
85 {
86 	struct xfs_mount	*mp = XFS_I(file_inode(file))->i_mount;
87 	struct xfs_fsop_bulkreq	bulkreq;
88 	struct xfs_ibulk	breq = {
89 		.mp		= mp,
90 		.idmap		= file_mnt_idmap(file),
91 		.ocount		= 0,
92 	};
93 	xfs_ino_t		lastino;
94 	int			error;
95 
96 	/* done = 1 if there are more stats to get and if bulkstat */
97 	/* should be called again (unused here, but used in dmapi) */
98 
99 	if (!capable(CAP_SYS_ADMIN))
100 		return -EPERM;
101 
102 	if (xfs_is_shutdown(mp))
103 		return -EIO;
104 
105 	if (copy_from_user(&bulkreq, arg, sizeof(struct xfs_fsop_bulkreq)))
106 		return -EFAULT;
107 
108 	if (copy_from_user(&lastino, bulkreq.lastip, sizeof(__s64)))
109 		return -EFAULT;
110 
111 	if (bulkreq.icount <= 0)
112 		return -EINVAL;
113 
114 	if (bulkreq.ubuffer == NULL)
115 		return -EINVAL;
116 
117 	breq.ubuffer = bulkreq.ubuffer;
118 	breq.icount = bulkreq.icount;
119 
120 	/*
121 	 * FSBULKSTAT_SINGLE expects that *lastip contains the inode number
122 	 * that we want to stat.  However, FSINUMBERS and FSBULKSTAT expect
123 	 * that *lastip contains either zero or the number of the last inode to
124 	 * be examined by the previous call and return results starting with
125 	 * the next inode after that.  The new bulk request back end functions
126 	 * take the inode to start with, so we have to compute the startino
127 	 * parameter from lastino to maintain correct function.  lastino == 0
128 	 * is a special case because it has traditionally meant "first inode
129 	 * in filesystem".
130 	 */
131 	if (cmd == XFS_IOC_FSINUMBERS) {
132 		breq.startino = lastino ? lastino + 1 : 0;
133 		error = xfs_inumbers(&breq, xfs_fsinumbers_fmt);
134 		lastino = breq.startino - 1;
135 	} else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE) {
136 		breq.startino = lastino;
137 		breq.icount = 1;
138 		error = xfs_bulkstat_one(&breq, xfs_fsbulkstat_one_fmt);
139 	} else {	/* XFS_IOC_FSBULKSTAT */
140 		breq.startino = lastino ? lastino + 1 : 0;
141 		error = xfs_bulkstat(&breq, xfs_fsbulkstat_one_fmt);
142 		lastino = breq.startino - 1;
143 	}
144 
145 	if (error)
146 		return error;
147 
148 	if (bulkreq.lastip != NULL &&
149 	    copy_to_user(bulkreq.lastip, &lastino, sizeof(xfs_ino_t)))
150 		return -EFAULT;
151 
152 	if (bulkreq.ocount != NULL &&
153 	    copy_to_user(bulkreq.ocount, &breq.ocount, sizeof(__s32)))
154 		return -EFAULT;
155 
156 	return 0;
157 }
158 
159 /* Return 0 on success or positive error */
160 static int
xfs_bulkstat_fmt(struct xfs_ibulk * breq,const struct xfs_bulkstat * bstat)161 xfs_bulkstat_fmt(
162 	struct xfs_ibulk		*breq,
163 	const struct xfs_bulkstat	*bstat)
164 {
165 	if (copy_to_user(breq->ubuffer, bstat, sizeof(struct xfs_bulkstat)))
166 		return -EFAULT;
167 	return xfs_ibulk_advance(breq, sizeof(struct xfs_bulkstat));
168 }
169 
170 /*
171  * Check the incoming bulk request @hdr from userspace and initialize the
172  * internal @breq bulk request appropriately.  Returns 0 if the bulk request
173  * should proceed; -ECANCELED if there's nothing to do; or the usual
174  * negative error code.
175  */
176 static int
xfs_bulk_ireq_setup(struct xfs_mount * mp,const struct xfs_bulk_ireq * hdr,struct xfs_ibulk * breq,void __user * ubuffer)177 xfs_bulk_ireq_setup(
178 	struct xfs_mount	*mp,
179 	const struct xfs_bulk_ireq *hdr,
180 	struct xfs_ibulk	*breq,
181 	void __user		*ubuffer)
182 {
183 	if (hdr->icount == 0 ||
184 	    (hdr->flags & ~XFS_BULK_IREQ_FLAGS_ALL) ||
185 	    memchr_inv(hdr->reserved, 0, sizeof(hdr->reserved)))
186 		return -EINVAL;
187 
188 	breq->startino = hdr->ino;
189 	breq->ubuffer = ubuffer;
190 	breq->icount = hdr->icount;
191 	breq->ocount = 0;
192 	breq->flags = 0;
193 
194 	/*
195 	 * The @ino parameter is a special value, so we must look it up here.
196 	 * We're not allowed to have IREQ_AGNO, and we only return one inode
197 	 * worth of data.
198 	 */
199 	if (hdr->flags & XFS_BULK_IREQ_SPECIAL) {
200 		if (hdr->flags & XFS_BULK_IREQ_AGNO)
201 			return -EINVAL;
202 
203 		switch (hdr->ino) {
204 		case XFS_BULK_IREQ_SPECIAL_ROOT:
205 			breq->startino = mp->m_sb.sb_rootino;
206 			break;
207 		default:
208 			return -EINVAL;
209 		}
210 		breq->icount = 1;
211 	}
212 
213 	/*
214 	 * The IREQ_AGNO flag means that we only want results from a given AG.
215 	 * If @hdr->ino is zero, we start iterating in that AG.  If @hdr->ino is
216 	 * beyond the specified AG then we return no results.
217 	 */
218 	if (hdr->flags & XFS_BULK_IREQ_AGNO) {
219 		if (hdr->agno >= mp->m_sb.sb_agcount)
220 			return -EINVAL;
221 
222 		if (breq->startino == 0)
223 			breq->startino = XFS_AGINO_TO_INO(mp, hdr->agno, 0);
224 		else if (XFS_INO_TO_AGNO(mp, breq->startino) < hdr->agno)
225 			return -EINVAL;
226 
227 		breq->iwalk_flags |= XFS_IWALK_SAME_AG;
228 
229 		/* Asking for an inode past the end of the AG?  We're done! */
230 		if (XFS_INO_TO_AGNO(mp, breq->startino) > hdr->agno)
231 			return -ECANCELED;
232 	} else if (hdr->agno)
233 		return -EINVAL;
234 
235 	/* Asking for an inode past the end of the FS?  We're done! */
236 	if (XFS_INO_TO_AGNO(mp, breq->startino) >= mp->m_sb.sb_agcount)
237 		return -ECANCELED;
238 
239 	if (hdr->flags & XFS_BULK_IREQ_NREXT64)
240 		breq->flags |= XFS_IBULK_NREXT64;
241 
242 	/* Caller wants to see metadata directories in bulkstat output. */
243 	if (hdr->flags & XFS_BULK_IREQ_METADIR)
244 		breq->flags |= XFS_IBULK_METADIR;
245 
246 	return 0;
247 }
248 
249 /*
250  * Update the userspace bulk request @hdr to reflect the end state of the
251  * internal bulk request @breq.
252  */
253 static void
xfs_bulk_ireq_teardown(struct xfs_bulk_ireq * hdr,struct xfs_ibulk * breq)254 xfs_bulk_ireq_teardown(
255 	struct xfs_bulk_ireq	*hdr,
256 	struct xfs_ibulk	*breq)
257 {
258 	hdr->ino = breq->startino;
259 	hdr->ocount = breq->ocount;
260 }
261 
262 /* Handle the v5 bulkstat ioctl. */
263 STATIC int
xfs_ioc_bulkstat(struct file * file,unsigned int cmd,struct xfs_bulkstat_req __user * arg)264 xfs_ioc_bulkstat(
265 	struct file			*file,
266 	unsigned int			cmd,
267 	struct xfs_bulkstat_req __user	*arg)
268 {
269 	struct xfs_mount		*mp = XFS_I(file_inode(file))->i_mount;
270 	struct xfs_bulk_ireq		hdr;
271 	struct xfs_ibulk		breq = {
272 		.mp			= mp,
273 		.idmap			= file_mnt_idmap(file),
274 	};
275 	int				error;
276 
277 	if (!capable(CAP_SYS_ADMIN))
278 		return -EPERM;
279 
280 	if (xfs_is_shutdown(mp))
281 		return -EIO;
282 
283 	if (copy_from_user(&hdr, &arg->hdr, sizeof(hdr)))
284 		return -EFAULT;
285 
286 	error = xfs_bulk_ireq_setup(mp, &hdr, &breq, arg->bulkstat);
287 	if (error == -ECANCELED)
288 		goto out_teardown;
289 	if (error < 0)
290 		return error;
291 
292 	error = xfs_bulkstat(&breq, xfs_bulkstat_fmt);
293 	if (error)
294 		return error;
295 
296 out_teardown:
297 	xfs_bulk_ireq_teardown(&hdr, &breq);
298 	if (copy_to_user(&arg->hdr, &hdr, sizeof(hdr)))
299 		return -EFAULT;
300 
301 	return 0;
302 }
303 
304 STATIC int
xfs_inumbers_fmt(struct xfs_ibulk * breq,const struct xfs_inumbers * igrp)305 xfs_inumbers_fmt(
306 	struct xfs_ibulk		*breq,
307 	const struct xfs_inumbers	*igrp)
308 {
309 	if (copy_to_user(breq->ubuffer, igrp, sizeof(struct xfs_inumbers)))
310 		return -EFAULT;
311 	return xfs_ibulk_advance(breq, sizeof(struct xfs_inumbers));
312 }
313 
314 /* Handle the v5 inumbers ioctl. */
315 STATIC int
xfs_ioc_inumbers(struct xfs_mount * mp,unsigned int cmd,struct xfs_inumbers_req __user * arg)316 xfs_ioc_inumbers(
317 	struct xfs_mount		*mp,
318 	unsigned int			cmd,
319 	struct xfs_inumbers_req __user	*arg)
320 {
321 	struct xfs_bulk_ireq		hdr;
322 	struct xfs_ibulk		breq = {
323 		.mp			= mp,
324 	};
325 	int				error;
326 
327 	if (!capable(CAP_SYS_ADMIN))
328 		return -EPERM;
329 
330 	if (xfs_is_shutdown(mp))
331 		return -EIO;
332 
333 	if (copy_from_user(&hdr, &arg->hdr, sizeof(hdr)))
334 		return -EFAULT;
335 
336 	if (hdr.flags & XFS_BULK_IREQ_METADIR)
337 		return -EINVAL;
338 
339 	error = xfs_bulk_ireq_setup(mp, &hdr, &breq, arg->inumbers);
340 	if (error == -ECANCELED)
341 		goto out_teardown;
342 	if (error < 0)
343 		return error;
344 
345 	error = xfs_inumbers(&breq, xfs_inumbers_fmt);
346 	if (error)
347 		return error;
348 
349 out_teardown:
350 	xfs_bulk_ireq_teardown(&hdr, &breq);
351 	if (copy_to_user(&arg->hdr, &hdr, sizeof(hdr)))
352 		return -EFAULT;
353 
354 	return 0;
355 }
356 
357 STATIC int
xfs_ioc_fsgeometry(struct xfs_mount * mp,void __user * arg,int struct_version)358 xfs_ioc_fsgeometry(
359 	struct xfs_mount	*mp,
360 	void			__user *arg,
361 	int			struct_version)
362 {
363 	struct xfs_fsop_geom	fsgeo;
364 	size_t			len;
365 
366 	xfs_fs_geometry(mp, &fsgeo, struct_version);
367 
368 	if (struct_version <= 3)
369 		len = sizeof(struct xfs_fsop_geom_v1);
370 	else if (struct_version == 4)
371 		len = sizeof(struct xfs_fsop_geom_v4);
372 	else {
373 		xfs_fsop_geom_health(mp, &fsgeo);
374 		len = sizeof(fsgeo);
375 	}
376 
377 	if (copy_to_user(arg, &fsgeo, len))
378 		return -EFAULT;
379 	return 0;
380 }
381 
382 STATIC int
xfs_ioc_ag_geometry(struct xfs_mount * mp,void __user * arg)383 xfs_ioc_ag_geometry(
384 	struct xfs_mount	*mp,
385 	void			__user *arg)
386 {
387 	struct xfs_perag	*pag;
388 	struct xfs_ag_geometry	ageo;
389 	int			error;
390 
391 	if (copy_from_user(&ageo, arg, sizeof(ageo)))
392 		return -EFAULT;
393 	if (ageo.ag_flags)
394 		return -EINVAL;
395 	if (memchr_inv(&ageo.ag_reserved, 0, sizeof(ageo.ag_reserved)))
396 		return -EINVAL;
397 
398 	pag = xfs_perag_get(mp, ageo.ag_number);
399 	if (!pag)
400 		return -EINVAL;
401 
402 	error = xfs_ag_get_geometry(pag, &ageo);
403 	xfs_perag_put(pag);
404 	if (error)
405 		return error;
406 
407 	if (copy_to_user(arg, &ageo, sizeof(ageo)))
408 		return -EFAULT;
409 	return 0;
410 }
411 
412 STATIC int
xfs_ioc_rtgroup_geometry(struct xfs_mount * mp,void __user * arg)413 xfs_ioc_rtgroup_geometry(
414 	struct xfs_mount	*mp,
415 	void			__user *arg)
416 {
417 	struct xfs_rtgroup	*rtg;
418 	struct xfs_rtgroup_geometry rgeo;
419 	xfs_rgblock_t		highest_rgbno;
420 	int			error;
421 
422 	if (copy_from_user(&rgeo, arg, sizeof(rgeo)))
423 		return -EFAULT;
424 	if (rgeo.rg_flags)
425 		return -EINVAL;
426 	if (memchr_inv(&rgeo.rg_reserved, 0, sizeof(rgeo.rg_reserved)))
427 		return -EINVAL;
428 	if (!xfs_has_rtgroups(mp))
429 		return -EINVAL;
430 
431 	rtg = xfs_rtgroup_get(mp, rgeo.rg_number);
432 	if (!rtg)
433 		return -EINVAL;
434 
435 	error = xfs_rtgroup_get_geometry(rtg, &rgeo);
436 	xfs_rtgroup_put(rtg);
437 	if (error)
438 		return error;
439 
440 	if (xfs_has_zoned(mp)) {
441 		xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP);
442 		if (rtg->rtg_open_zone) {
443 			rgeo.rg_writepointer = rtg->rtg_open_zone->oz_allocated;
444 		} else {
445 			highest_rgbno = xfs_rtrmap_highest_rgbno(rtg);
446 			if (highest_rgbno == NULLRGBLOCK)
447 				rgeo.rg_writepointer = 0;
448 			else
449 				rgeo.rg_writepointer = highest_rgbno + 1;
450 		}
451 		xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_RMAP);
452 		rgeo.rg_flags |= XFS_RTGROUP_GEOM_WRITEPOINTER;
453 	}
454 
455 	if (copy_to_user(arg, &rgeo, sizeof(rgeo)))
456 		return -EFAULT;
457 	return 0;
458 }
459 
460 /*
461  * Linux extended inode flags interface.
462  */
463 
464 static void
xfs_fill_fsxattr(struct xfs_inode * ip,int whichfork,struct file_kattr * fa)465 xfs_fill_fsxattr(
466 	struct xfs_inode	*ip,
467 	int			whichfork,
468 	struct file_kattr	*fa)
469 {
470 	struct xfs_mount	*mp = ip->i_mount;
471 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
472 
473 	fileattr_fill_xflags(fa, xfs_ip2xflags(ip));
474 
475 	if (ip->i_diflags & XFS_DIFLAG_EXTSIZE) {
476 		fa->fsx_extsize = XFS_FSB_TO_B(mp, ip->i_extsize);
477 	} else if (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
478 		/*
479 		 * Don't let a misaligned extent size hint on a directory
480 		 * escape to userspace if it won't pass the setattr checks
481 		 * later.
482 		 */
483 		if ((ip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
484 		    xfs_extlen_to_rtxmod(mp, ip->i_extsize) > 0) {
485 			fa->fsx_xflags &= ~(FS_XFLAG_EXTSIZE |
486 					    FS_XFLAG_EXTSZINHERIT);
487 			fa->fsx_extsize = 0;
488 		} else {
489 			fa->fsx_extsize = XFS_FSB_TO_B(mp, ip->i_extsize);
490 		}
491 	}
492 
493 	if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) {
494 		/*
495 		 * Don't let a misaligned CoW extent size hint on a directory
496 		 * escape to userspace if it won't pass the setattr checks
497 		 * later.
498 		 */
499 		if ((ip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
500 		    ip->i_cowextsize % mp->m_sb.sb_rextsize > 0) {
501 			fa->fsx_xflags &= ~FS_XFLAG_COWEXTSIZE;
502 			fa->fsx_cowextsize = 0;
503 		} else {
504 			fa->fsx_cowextsize = XFS_FSB_TO_B(mp, ip->i_cowextsize);
505 		}
506 	}
507 
508 	fa->fsx_projid = ip->i_projid;
509 	if (ifp && !xfs_need_iread_extents(ifp))
510 		fa->fsx_nextents = xfs_iext_count(ifp);
511 	else
512 		fa->fsx_nextents = xfs_ifork_nextents(ifp);
513 }
514 
515 STATIC int
xfs_ioc_fsgetxattra(xfs_inode_t * ip,void __user * arg)516 xfs_ioc_fsgetxattra(
517 	xfs_inode_t		*ip,
518 	void			__user *arg)
519 {
520 	struct file_kattr	fa;
521 
522 	xfs_ilock(ip, XFS_ILOCK_SHARED);
523 	xfs_fill_fsxattr(ip, XFS_ATTR_FORK, &fa);
524 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
525 
526 	return copy_fsxattr_to_user(&fa, arg);
527 }
528 
529 int
xfs_fileattr_get(struct dentry * dentry,struct file_kattr * fa)530 xfs_fileattr_get(
531 	struct dentry		*dentry,
532 	struct file_kattr	*fa)
533 {
534 	struct xfs_inode	*ip = XFS_I(d_inode(dentry));
535 
536 	xfs_ilock(ip, XFS_ILOCK_SHARED);
537 	xfs_fill_fsxattr(ip, XFS_DATA_FORK, fa);
538 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
539 
540 	return 0;
541 }
542 
543 static int
xfs_ioctl_setattr_xflags(struct xfs_trans * tp,struct xfs_inode * ip,struct file_kattr * fa)544 xfs_ioctl_setattr_xflags(
545 	struct xfs_trans	*tp,
546 	struct xfs_inode	*ip,
547 	struct file_kattr	*fa)
548 {
549 	struct xfs_mount	*mp = ip->i_mount;
550 	bool			rtflag = (fa->fsx_xflags & FS_XFLAG_REALTIME);
551 	uint64_t		i_flags2;
552 
553 	if (rtflag != XFS_IS_REALTIME_INODE(ip)) {
554 		/* Can't change realtime flag if any extents are allocated. */
555 		if (xfs_inode_has_filedata(ip))
556 			return -EINVAL;
557 
558 		/*
559 		 * If S_DAX is enabled on this file, we can only switch the
560 		 * device if both support fsdax.  We can't update S_DAX because
561 		 * there might be other threads walking down the access paths.
562 		 */
563 		if (IS_DAX(VFS_I(ip)) &&
564 		    (mp->m_ddev_targp->bt_daxdev == NULL ||
565 		     (mp->m_rtdev_targp &&
566 		      mp->m_rtdev_targp->bt_daxdev == NULL)))
567 			return -EINVAL;
568 	}
569 
570 	if (rtflag) {
571 		/* If realtime flag is set then must have realtime device */
572 		if (mp->m_sb.sb_rblocks == 0 || mp->m_sb.sb_rextsize == 0 ||
573 		    xfs_extlen_to_rtxmod(mp, ip->i_extsize))
574 			return -EINVAL;
575 	}
576 
577 	/* diflags2 only valid for v3 inodes. */
578 	i_flags2 = xfs_flags2diflags2(ip, fa->fsx_xflags);
579 	if (i_flags2 && !xfs_has_v3inodes(mp))
580 		return -EINVAL;
581 
582 	ip->i_diflags = xfs_flags2diflags(ip, fa->fsx_xflags);
583 	ip->i_diflags2 = i_flags2;
584 
585 	xfs_diflags_to_iflags(ip, false);
586 
587 	/*
588 	 * Make the stable writes flag match that of the device the inode
589 	 * resides on when flipping the RT flag.
590 	 */
591 	if (rtflag != XFS_IS_REALTIME_INODE(ip) && S_ISREG(VFS_I(ip)->i_mode))
592 		xfs_update_stable_writes(ip);
593 
594 	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
595 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
596 	XFS_STATS_INC(mp, xs_ig_attrchg);
597 	return 0;
598 }
599 
600 static void
xfs_ioctl_setattr_prepare_dax(struct xfs_inode * ip,struct file_kattr * fa)601 xfs_ioctl_setattr_prepare_dax(
602 	struct xfs_inode	*ip,
603 	struct file_kattr	*fa)
604 {
605 	struct xfs_mount	*mp = ip->i_mount;
606 	struct inode            *inode = VFS_I(ip);
607 
608 	if (S_ISDIR(inode->i_mode))
609 		return;
610 
611 	if (xfs_has_dax_always(mp) || xfs_has_dax_never(mp))
612 		return;
613 
614 	if (((fa->fsx_xflags & FS_XFLAG_DAX) &&
615 	    !(ip->i_diflags2 & XFS_DIFLAG2_DAX)) ||
616 	    (!(fa->fsx_xflags & FS_XFLAG_DAX) &&
617 	     (ip->i_diflags2 & XFS_DIFLAG2_DAX)))
618 		d_mark_dontcache(inode);
619 }
620 
621 /*
622  * Set up the transaction structure for the setattr operation, checking that we
623  * have permission to do so. On success, return a clean transaction and the
624  * inode locked exclusively ready for further operation specific checks. On
625  * failure, return an error without modifying or locking the inode.
626  */
627 static struct xfs_trans *
xfs_ioctl_setattr_get_trans(struct xfs_inode * ip,struct xfs_dquot * pdqp)628 xfs_ioctl_setattr_get_trans(
629 	struct xfs_inode	*ip,
630 	struct xfs_dquot	*pdqp)
631 {
632 	struct xfs_mount	*mp = ip->i_mount;
633 	struct xfs_trans	*tp;
634 	int			error = -EROFS;
635 
636 	if (xfs_is_readonly(mp))
637 		goto out_error;
638 	error = -EIO;
639 	if (xfs_is_shutdown(mp))
640 		goto out_error;
641 
642 	error = xfs_trans_alloc_ichange(ip, NULL, NULL, pdqp,
643 			has_capability_noaudit(current, CAP_FOWNER), &tp);
644 	if (error)
645 		goto out_error;
646 
647 	if (xfs_has_wsync(mp))
648 		xfs_trans_set_sync(tp);
649 
650 	return tp;
651 
652 out_error:
653 	return ERR_PTR(error);
654 }
655 
656 /*
657  * Validate a proposed extent size hint.  For regular files, the hint can only
658  * be changed if no extents are allocated.
659  */
660 static int
xfs_ioctl_setattr_check_extsize(struct xfs_inode * ip,struct file_kattr * fa)661 xfs_ioctl_setattr_check_extsize(
662 	struct xfs_inode	*ip,
663 	struct file_kattr	*fa)
664 {
665 	struct xfs_mount	*mp = ip->i_mount;
666 	xfs_failaddr_t		failaddr;
667 	uint16_t		new_diflags;
668 
669 	if (!fa->fsx_valid)
670 		return 0;
671 
672 	if (S_ISREG(VFS_I(ip)->i_mode) && xfs_inode_has_filedata(ip) &&
673 	    XFS_FSB_TO_B(mp, ip->i_extsize) != fa->fsx_extsize)
674 		return -EINVAL;
675 
676 	if (fa->fsx_extsize & mp->m_blockmask)
677 		return -EINVAL;
678 
679 	new_diflags = xfs_flags2diflags(ip, fa->fsx_xflags);
680 
681 	/*
682 	 * Inode verifiers do not check that the extent size hint is an integer
683 	 * multiple of the rt extent size on a directory with both rtinherit
684 	 * and extszinherit flags set.  Don't let sysadmins misconfigure
685 	 * directories.
686 	 */
687 	if ((new_diflags & XFS_DIFLAG_RTINHERIT) &&
688 	    (new_diflags & XFS_DIFLAG_EXTSZINHERIT)) {
689 		unsigned int	rtextsize_bytes;
690 
691 		rtextsize_bytes = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize);
692 		if (fa->fsx_extsize % rtextsize_bytes)
693 			return -EINVAL;
694 	}
695 
696 	failaddr = xfs_inode_validate_extsize(ip->i_mount,
697 			XFS_B_TO_FSB(mp, fa->fsx_extsize),
698 			VFS_I(ip)->i_mode, new_diflags);
699 	return failaddr != NULL ? -EINVAL : 0;
700 }
701 
702 static int
xfs_ioctl_setattr_check_cowextsize(struct xfs_inode * ip,struct file_kattr * fa)703 xfs_ioctl_setattr_check_cowextsize(
704 	struct xfs_inode	*ip,
705 	struct file_kattr	*fa)
706 {
707 	struct xfs_mount	*mp = ip->i_mount;
708 	xfs_failaddr_t		failaddr;
709 	uint64_t		new_diflags2;
710 	uint16_t		new_diflags;
711 
712 	if (!fa->fsx_valid)
713 		return 0;
714 
715 	if (fa->fsx_cowextsize & mp->m_blockmask)
716 		return -EINVAL;
717 
718 	new_diflags = xfs_flags2diflags(ip, fa->fsx_xflags);
719 	new_diflags2 = xfs_flags2diflags2(ip, fa->fsx_xflags);
720 
721 	failaddr = xfs_inode_validate_cowextsize(ip->i_mount,
722 			XFS_B_TO_FSB(mp, fa->fsx_cowextsize),
723 			VFS_I(ip)->i_mode, new_diflags, new_diflags2);
724 	return failaddr != NULL ? -EINVAL : 0;
725 }
726 
727 static int
xfs_ioctl_setattr_check_projid(struct xfs_inode * ip,struct file_kattr * fa)728 xfs_ioctl_setattr_check_projid(
729 	struct xfs_inode	*ip,
730 	struct file_kattr	*fa)
731 {
732 	if (!fa->fsx_valid)
733 		return 0;
734 
735 	/* Disallow 32bit project ids if 32bit IDs are not enabled. */
736 	if (fa->fsx_projid > (uint16_t)-1 &&
737 	    !xfs_has_projid32(ip->i_mount))
738 		return -EINVAL;
739 	return 0;
740 }
741 
742 int
xfs_fileattr_set(struct mnt_idmap * idmap,struct dentry * dentry,struct file_kattr * fa)743 xfs_fileattr_set(
744 	struct mnt_idmap	*idmap,
745 	struct dentry		*dentry,
746 	struct file_kattr	*fa)
747 {
748 	struct xfs_inode	*ip = XFS_I(d_inode(dentry));
749 	struct xfs_mount	*mp = ip->i_mount;
750 	struct xfs_trans	*tp;
751 	struct xfs_dquot	*pdqp = NULL;
752 	struct xfs_dquot	*olddquot = NULL;
753 	int			error;
754 
755 	trace_xfs_ioctl_setattr(ip);
756 
757 	if (!fa->fsx_valid) {
758 		if (fa->flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL |
759 				  FS_NOATIME_FL | FS_NODUMP_FL |
760 				  FS_SYNC_FL | FS_DAX_FL | FS_PROJINHERIT_FL))
761 			return -EOPNOTSUPP;
762 	}
763 
764 	error = xfs_ioctl_setattr_check_projid(ip, fa);
765 	if (error)
766 		return error;
767 
768 	/*
769 	 * If disk quotas is on, we make sure that the dquots do exist on disk,
770 	 * before we start any other transactions. Trying to do this later
771 	 * is messy. We don't care to take a readlock to look at the ids
772 	 * in inode here, because we can't hold it across the trans_reserve.
773 	 * If the IDs do change before we take the ilock, we're covered
774 	 * because the i_*dquot fields will get updated anyway.
775 	 */
776 	if (fa->fsx_valid && XFS_IS_QUOTA_ON(mp)) {
777 		error = xfs_qm_vop_dqalloc(ip, VFS_I(ip)->i_uid,
778 				VFS_I(ip)->i_gid, fa->fsx_projid,
779 				XFS_QMOPT_PQUOTA, NULL, NULL, &pdqp);
780 		if (error)
781 			return error;
782 	}
783 
784 	xfs_ioctl_setattr_prepare_dax(ip, fa);
785 
786 	tp = xfs_ioctl_setattr_get_trans(ip, pdqp);
787 	if (IS_ERR(tp)) {
788 		error = PTR_ERR(tp);
789 		goto error_free_dquots;
790 	}
791 
792 	error = xfs_ioctl_setattr_check_extsize(ip, fa);
793 	if (error)
794 		goto error_trans_cancel;
795 
796 	error = xfs_ioctl_setattr_check_cowextsize(ip, fa);
797 	if (error)
798 		goto error_trans_cancel;
799 
800 	error = xfs_ioctl_setattr_xflags(tp, ip, fa);
801 	if (error)
802 		goto error_trans_cancel;
803 
804 	if (!fa->fsx_valid)
805 		goto skip_xattr;
806 	/*
807 	 * Change file ownership.  Must be the owner or privileged.  CAP_FSETID
808 	 * overrides the following restrictions:
809 	 *
810 	 * The set-user-ID and set-group-ID bits of a file will be cleared upon
811 	 * successful return from chown()
812 	 */
813 
814 	if ((VFS_I(ip)->i_mode & (S_ISUID|S_ISGID)) &&
815 	    !capable_wrt_inode_uidgid(idmap, VFS_I(ip), CAP_FSETID))
816 		VFS_I(ip)->i_mode &= ~(S_ISUID|S_ISGID);
817 
818 	/* Change the ownerships and register project quota modifications */
819 	if (ip->i_projid != fa->fsx_projid) {
820 		if (XFS_IS_PQUOTA_ON(mp)) {
821 			olddquot = xfs_qm_vop_chown(tp, ip,
822 						&ip->i_pdquot, pdqp);
823 		}
824 		ip->i_projid = fa->fsx_projid;
825 	}
826 
827 	/*
828 	 * Only set the extent size hint if we've already determined that the
829 	 * extent size hint should be set on the inode. If no extent size flags
830 	 * are set on the inode then unconditionally clear the extent size hint.
831 	 */
832 	if (ip->i_diflags & (XFS_DIFLAG_EXTSIZE | XFS_DIFLAG_EXTSZINHERIT))
833 		ip->i_extsize = XFS_B_TO_FSB(mp, fa->fsx_extsize);
834 	else
835 		ip->i_extsize = 0;
836 
837 	if (xfs_has_v3inodes(mp)) {
838 		if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
839 			ip->i_cowextsize = XFS_B_TO_FSB(mp, fa->fsx_cowextsize);
840 		else
841 			ip->i_cowextsize = 0;
842 	}
843 
844 skip_xattr:
845 	error = xfs_trans_commit(tp);
846 
847 	/*
848 	 * Release any dquot(s) the inode had kept before chown.
849 	 */
850 	xfs_qm_dqrele(olddquot);
851 	xfs_qm_dqrele(pdqp);
852 
853 	return error;
854 
855 error_trans_cancel:
856 	xfs_trans_cancel(tp);
857 error_free_dquots:
858 	xfs_qm_dqrele(pdqp);
859 	return error;
860 }
861 
862 static bool
xfs_getbmap_format(struct kgetbmap * p,struct getbmapx __user * u,size_t recsize)863 xfs_getbmap_format(
864 	struct kgetbmap		*p,
865 	struct getbmapx __user	*u,
866 	size_t			recsize)
867 {
868 	if (put_user(p->bmv_offset, &u->bmv_offset) ||
869 	    put_user(p->bmv_block, &u->bmv_block) ||
870 	    put_user(p->bmv_length, &u->bmv_length) ||
871 	    put_user(0, &u->bmv_count) ||
872 	    put_user(0, &u->bmv_entries))
873 		return false;
874 	if (recsize < sizeof(struct getbmapx))
875 		return true;
876 	if (put_user(0, &u->bmv_iflags) ||
877 	    put_user(p->bmv_oflags, &u->bmv_oflags) ||
878 	    put_user(0, &u->bmv_unused1) ||
879 	    put_user(0, &u->bmv_unused2))
880 		return false;
881 	return true;
882 }
883 
884 STATIC int
xfs_ioc_getbmap(struct file * file,unsigned int cmd,void __user * arg)885 xfs_ioc_getbmap(
886 	struct file		*file,
887 	unsigned int		cmd,
888 	void			__user *arg)
889 {
890 	struct getbmapx		bmx = { 0 };
891 	struct kgetbmap		*buf;
892 	size_t			recsize;
893 	int			error, i;
894 
895 	switch (cmd) {
896 	case XFS_IOC_GETBMAPA:
897 		bmx.bmv_iflags = BMV_IF_ATTRFORK;
898 		fallthrough;
899 	case XFS_IOC_GETBMAP:
900 		/* struct getbmap is a strict subset of struct getbmapx. */
901 		recsize = sizeof(struct getbmap);
902 		break;
903 	case XFS_IOC_GETBMAPX:
904 		recsize = sizeof(struct getbmapx);
905 		break;
906 	default:
907 		return -EINVAL;
908 	}
909 
910 	if (copy_from_user(&bmx, arg, recsize))
911 		return -EFAULT;
912 
913 	if (bmx.bmv_count < 2)
914 		return -EINVAL;
915 	if (bmx.bmv_count >= INT_MAX / recsize)
916 		return -ENOMEM;
917 
918 	buf = kvzalloc_objs(*buf, bmx.bmv_count);
919 	if (!buf)
920 		return -ENOMEM;
921 
922 	error = xfs_getbmap(XFS_I(file_inode(file)), &bmx, buf);
923 	if (error)
924 		goto out_free_buf;
925 
926 	error = -EFAULT;
927 	if (copy_to_user(arg, &bmx, recsize))
928 		goto out_free_buf;
929 	arg += recsize;
930 
931 	for (i = 0; i < bmx.bmv_entries; i++) {
932 		if (!xfs_getbmap_format(buf + i, arg, recsize))
933 			goto out_free_buf;
934 		arg += recsize;
935 	}
936 
937 	error = 0;
938 out_free_buf:
939 	kvfree(buf);
940 	return error;
941 }
942 
943 int
xfs_ioc_swapext(xfs_swapext_t * sxp)944 xfs_ioc_swapext(
945 	xfs_swapext_t	*sxp)
946 {
947 	xfs_inode_t     *ip, *tip;
948 
949 	/* Pull information for the target fd */
950 	CLASS(fd, f)((int)sxp->sx_fdtarget);
951 	if (fd_empty(f))
952 		return -EINVAL;
953 
954 	if (!(fd_file(f)->f_mode & FMODE_WRITE) ||
955 	    !(fd_file(f)->f_mode & FMODE_READ) ||
956 	    (fd_file(f)->f_flags & O_APPEND))
957 		return -EBADF;
958 
959 	CLASS(fd, tmp)((int)sxp->sx_fdtmp);
960 	if (fd_empty(tmp))
961 		return -EINVAL;
962 
963 	if (!(fd_file(tmp)->f_mode & FMODE_WRITE) ||
964 	    !(fd_file(tmp)->f_mode & FMODE_READ) ||
965 	    (fd_file(tmp)->f_flags & O_APPEND))
966 		return -EBADF;
967 
968 	if (IS_SWAPFILE(file_inode(fd_file(f))) ||
969 	    IS_SWAPFILE(file_inode(fd_file(tmp))))
970 		return -EINVAL;
971 
972 	/*
973 	 * We need to ensure that the fds passed in point to XFS inodes
974 	 * before we cast and access them as XFS structures as we have no
975 	 * control over what the user passes us here.
976 	 */
977 	if (fd_file(f)->f_op != &xfs_file_operations ||
978 	    fd_file(tmp)->f_op != &xfs_file_operations)
979 		return -EINVAL;
980 
981 	ip = XFS_I(file_inode(fd_file(f)));
982 	tip = XFS_I(file_inode(fd_file(tmp)));
983 
984 	if (ip->i_mount != tip->i_mount)
985 		return -EINVAL;
986 
987 	if (ip->i_ino == tip->i_ino)
988 		return -EINVAL;
989 
990 	if (xfs_is_shutdown(ip->i_mount))
991 		return -EIO;
992 
993 	return xfs_swap_extents(ip, tip, sxp);
994 }
995 
996 static int
xfs_ioc_getlabel(struct xfs_mount * mp,char __user * user_label)997 xfs_ioc_getlabel(
998 	struct xfs_mount	*mp,
999 	char			__user *user_label)
1000 {
1001 	struct xfs_sb		*sbp = &mp->m_sb;
1002 	char			label[XFSLABEL_MAX + 1];
1003 
1004 	/* Paranoia */
1005 	BUILD_BUG_ON(sizeof(sbp->sb_fname) > FSLABEL_MAX);
1006 
1007 	/* 1 larger than sb_fname, so this ensures a trailing NUL char */
1008 	spin_lock(&mp->m_sb_lock);
1009 	memtostr_pad(label, sbp->sb_fname);
1010 	spin_unlock(&mp->m_sb_lock);
1011 
1012 	if (copy_to_user(user_label, label, sizeof(label)))
1013 		return -EFAULT;
1014 	return 0;
1015 }
1016 
1017 static int
xfs_ioc_setlabel(struct file * filp,struct xfs_mount * mp,char __user * newlabel)1018 xfs_ioc_setlabel(
1019 	struct file		*filp,
1020 	struct xfs_mount	*mp,
1021 	char			__user *newlabel)
1022 {
1023 	struct xfs_sb		*sbp = &mp->m_sb;
1024 	char			label[XFSLABEL_MAX + 1];
1025 	size_t			len;
1026 	int			error;
1027 
1028 	if (!capable(CAP_SYS_ADMIN))
1029 		return -EPERM;
1030 	/*
1031 	 * The generic ioctl allows up to FSLABEL_MAX chars, but XFS is much
1032 	 * smaller, at 12 bytes.  We copy one more to be sure we find the
1033 	 * (required) NULL character to test the incoming label length.
1034 	 * NB: The on disk label doesn't need to be null terminated.
1035 	 */
1036 	if (copy_from_user(label, newlabel, XFSLABEL_MAX + 1))
1037 		return -EFAULT;
1038 	len = strnlen(label, XFSLABEL_MAX + 1);
1039 	if (len > sizeof(sbp->sb_fname))
1040 		return -EINVAL;
1041 
1042 	error = mnt_want_write_file(filp);
1043 	if (error)
1044 		return error;
1045 
1046 	spin_lock(&mp->m_sb_lock);
1047 	memset(sbp->sb_fname, 0, sizeof(sbp->sb_fname));
1048 	memcpy(sbp->sb_fname, label, len);
1049 	spin_unlock(&mp->m_sb_lock);
1050 
1051 	/*
1052 	 * Now we do several things to satisfy userspace.
1053 	 * In addition to normal logging of the primary superblock, we also
1054 	 * immediately write these changes to sector zero for the primary, then
1055 	 * update all backup supers (as xfs_db does for a label change), then
1056 	 * invalidate the block device page cache.  This is so that any prior
1057 	 * buffered reads from userspace (i.e. from blkid) are invalidated,
1058 	 * and userspace will see the newly-written label.
1059 	 */
1060 	error = xfs_sync_sb_buf(mp, true);
1061 	if (error)
1062 		goto out;
1063 	/*
1064 	 * growfs also updates backup supers so lock against that.
1065 	 */
1066 	mutex_lock(&mp->m_growlock);
1067 	error = xfs_update_secondary_sbs(mp);
1068 	mutex_unlock(&mp->m_growlock);
1069 
1070 	invalidate_bdev(mp->m_ddev_targp->bt_bdev);
1071 	if (xfs_has_rtsb(mp) && mp->m_rtdev_targp)
1072 		invalidate_bdev(mp->m_rtdev_targp->bt_bdev);
1073 
1074 out:
1075 	mnt_drop_write_file(filp);
1076 	return error;
1077 }
1078 
1079 static inline int
xfs_fs_eofblocks_from_user(struct xfs_fs_eofblocks * src,struct xfs_icwalk * dst)1080 xfs_fs_eofblocks_from_user(
1081 	struct xfs_fs_eofblocks		*src,
1082 	struct xfs_icwalk		*dst)
1083 {
1084 	if (src->eof_version != XFS_EOFBLOCKS_VERSION)
1085 		return -EINVAL;
1086 
1087 	if (src->eof_flags & ~XFS_EOF_FLAGS_VALID)
1088 		return -EINVAL;
1089 
1090 	if (memchr_inv(&src->pad32, 0, sizeof(src->pad32)) ||
1091 	    memchr_inv(src->pad64, 0, sizeof(src->pad64)))
1092 		return -EINVAL;
1093 
1094 	dst->icw_flags = 0;
1095 	if (src->eof_flags & XFS_EOF_FLAGS_SYNC)
1096 		dst->icw_flags |= XFS_ICWALK_FLAG_SYNC;
1097 	if (src->eof_flags & XFS_EOF_FLAGS_UID)
1098 		dst->icw_flags |= XFS_ICWALK_FLAG_UID;
1099 	if (src->eof_flags & XFS_EOF_FLAGS_GID)
1100 		dst->icw_flags |= XFS_ICWALK_FLAG_GID;
1101 	if (src->eof_flags & XFS_EOF_FLAGS_PRID)
1102 		dst->icw_flags |= XFS_ICWALK_FLAG_PRID;
1103 	if (src->eof_flags & XFS_EOF_FLAGS_MINFILESIZE)
1104 		dst->icw_flags |= XFS_ICWALK_FLAG_MINFILESIZE;
1105 
1106 	dst->icw_prid = src->eof_prid;
1107 	dst->icw_min_file_size = src->eof_min_file_size;
1108 
1109 	dst->icw_uid = INVALID_UID;
1110 	if (src->eof_flags & XFS_EOF_FLAGS_UID) {
1111 		dst->icw_uid = make_kuid(current_user_ns(), src->eof_uid);
1112 		if (!uid_valid(dst->icw_uid))
1113 			return -EINVAL;
1114 	}
1115 
1116 	dst->icw_gid = INVALID_GID;
1117 	if (src->eof_flags & XFS_EOF_FLAGS_GID) {
1118 		dst->icw_gid = make_kgid(current_user_ns(), src->eof_gid);
1119 		if (!gid_valid(dst->icw_gid))
1120 			return -EINVAL;
1121 	}
1122 	return 0;
1123 }
1124 
1125 static int
xfs_ioctl_getset_resblocks(struct file * filp,unsigned int cmd,void __user * arg)1126 xfs_ioctl_getset_resblocks(
1127 	struct file		*filp,
1128 	unsigned int		cmd,
1129 	void __user		*arg)
1130 {
1131 	struct xfs_mount	*mp = XFS_I(file_inode(filp))->i_mount;
1132 	struct xfs_fsop_resblks	fsop = { };
1133 	int			error;
1134 
1135 	if (!capable(CAP_SYS_ADMIN))
1136 		return -EPERM;
1137 
1138 	if (cmd == XFS_IOC_SET_RESBLKS) {
1139 		if (xfs_is_readonly(mp))
1140 			return -EROFS;
1141 
1142 		if (copy_from_user(&fsop, arg, sizeof(fsop)))
1143 			return -EFAULT;
1144 
1145 		error = mnt_want_write_file(filp);
1146 		if (error)
1147 			return error;
1148 		error = xfs_reserve_blocks(mp, XC_FREE_BLOCKS, fsop.resblks);
1149 		mnt_drop_write_file(filp);
1150 		if (error)
1151 			return error;
1152 	}
1153 
1154 	spin_lock(&mp->m_sb_lock);
1155 	fsop.resblks = mp->m_free[XC_FREE_BLOCKS].res_total;
1156 	fsop.resblks_avail = mp->m_free[XC_FREE_BLOCKS].res_avail;
1157 	spin_unlock(&mp->m_sb_lock);
1158 
1159 	if (copy_to_user(arg, &fsop, sizeof(fsop)))
1160 		return -EFAULT;
1161 	return 0;
1162 }
1163 
1164 static int
xfs_ioctl_fs_counts(struct xfs_mount * mp,struct xfs_fsop_counts __user * uarg)1165 xfs_ioctl_fs_counts(
1166 	struct xfs_mount	*mp,
1167 	struct xfs_fsop_counts __user	*uarg)
1168 {
1169 	struct xfs_fsop_counts	out = {
1170 		.allocino = percpu_counter_read_positive(&mp->m_icount),
1171 		.freeino  = percpu_counter_read_positive(&mp->m_ifree),
1172 		.freedata = xfs_estimate_freecounter(mp, XC_FREE_BLOCKS) -
1173 				xfs_freecounter_unavailable(mp, XC_FREE_BLOCKS),
1174 		.freertx  = xfs_estimate_freecounter(mp, XC_FREE_RTEXTENTS),
1175 	};
1176 
1177 	if (copy_to_user(uarg, &out, sizeof(out)))
1178 		return -EFAULT;
1179 	return 0;
1180 }
1181 
1182 /*
1183  * These long-unused ioctls were removed from the official ioctl API in 5.17,
1184  * but retain these definitions so that we can log warnings about them.
1185  */
1186 #define XFS_IOC_ALLOCSP		_IOW ('X', 10, struct xfs_flock64)
1187 #define XFS_IOC_FREESP		_IOW ('X', 11, struct xfs_flock64)
1188 #define XFS_IOC_ALLOCSP64	_IOW ('X', 36, struct xfs_flock64)
1189 #define XFS_IOC_FREESP64	_IOW ('X', 37, struct xfs_flock64)
1190 
1191 /*
1192  * Note: some of the ioctl's return positive numbers as a
1193  * byte count indicating success, such as readlink_by_handle.
1194  * So we don't "sign flip" like most other routines.  This means
1195  * true errors need to be returned as a negative value.
1196  */
1197 long
xfs_file_ioctl(struct file * filp,unsigned int cmd,unsigned long p)1198 xfs_file_ioctl(
1199 	struct file		*filp,
1200 	unsigned int		cmd,
1201 	unsigned long		p)
1202 {
1203 	struct inode		*inode = file_inode(filp);
1204 	struct xfs_inode	*ip = XFS_I(inode);
1205 	struct xfs_mount	*mp = ip->i_mount;
1206 	void			__user *arg = (void __user *)p;
1207 	int			error;
1208 
1209 	trace_xfs_file_ioctl(ip);
1210 
1211 	switch (cmd) {
1212 	case FITRIM:
1213 		return xfs_ioc_trim(mp, arg);
1214 	case FS_IOC_GETFSLABEL:
1215 		return xfs_ioc_getlabel(mp, arg);
1216 	case FS_IOC_SETFSLABEL:
1217 		return xfs_ioc_setlabel(filp, mp, arg);
1218 	case XFS_IOC_ALLOCSP:
1219 	case XFS_IOC_FREESP:
1220 	case XFS_IOC_ALLOCSP64:
1221 	case XFS_IOC_FREESP64:
1222 		xfs_warn_once(mp,
1223 	"%s should use fallocate; XFS_IOC_{ALLOC,FREE}SP ioctl unsupported",
1224 				current->comm);
1225 		return -ENOTTY;
1226 	case XFS_IOC_DIOINFO: {
1227 		struct kstat		st;
1228 		struct dioattr		da;
1229 
1230 		error = vfs_getattr(&filp->f_path, &st, STATX_DIOALIGN, 0);
1231 		if (error)
1232 			return error;
1233 
1234 		/*
1235 		 * Some userspace directly feeds the return value to
1236 		 * posix_memalign, which fails for values that are smaller than
1237 		 * the pointer size.  Round up the value to not break userspace.
1238 		 */
1239 		da.d_mem = roundup(st.dio_mem_align, sizeof(void *));
1240 		da.d_miniosz = st.dio_offset_align;
1241 		da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1);
1242 		if (copy_to_user(arg, &da, sizeof(da)))
1243 			return -EFAULT;
1244 		return 0;
1245 	}
1246 
1247 	case XFS_IOC_FSBULKSTAT_SINGLE:
1248 	case XFS_IOC_FSBULKSTAT:
1249 	case XFS_IOC_FSINUMBERS:
1250 		return xfs_ioc_fsbulkstat(filp, cmd, arg);
1251 
1252 	case XFS_IOC_BULKSTAT:
1253 		return xfs_ioc_bulkstat(filp, cmd, arg);
1254 	case XFS_IOC_INUMBERS:
1255 		return xfs_ioc_inumbers(mp, cmd, arg);
1256 
1257 	case XFS_IOC_FSGEOMETRY_V1:
1258 		return xfs_ioc_fsgeometry(mp, arg, 3);
1259 	case XFS_IOC_FSGEOMETRY_V4:
1260 		return xfs_ioc_fsgeometry(mp, arg, 4);
1261 	case XFS_IOC_FSGEOMETRY:
1262 		return xfs_ioc_fsgeometry(mp, arg, 5);
1263 
1264 	case XFS_IOC_AG_GEOMETRY:
1265 		return xfs_ioc_ag_geometry(mp, arg);
1266 	case XFS_IOC_RTGROUP_GEOMETRY:
1267 		return xfs_ioc_rtgroup_geometry(mp, arg);
1268 
1269 	case XFS_IOC_GETVERSION:
1270 		return put_user(inode->i_generation, (int __user *)arg);
1271 
1272 	case XFS_IOC_FSGETXATTRA:
1273 		return xfs_ioc_fsgetxattra(ip, arg);
1274 	case XFS_IOC_GETPARENTS:
1275 		return xfs_ioc_getparents(filp, arg);
1276 	case XFS_IOC_GETPARENTS_BY_HANDLE:
1277 		return xfs_ioc_getparents_by_handle(filp, arg);
1278 	case XFS_IOC_GETBMAP:
1279 	case XFS_IOC_GETBMAPA:
1280 	case XFS_IOC_GETBMAPX:
1281 		return xfs_ioc_getbmap(filp, cmd, arg);
1282 
1283 	case FS_IOC_GETFSMAP:
1284 		return xfs_ioc_getfsmap(ip, arg);
1285 
1286 	case XFS_IOC_SCRUBV_METADATA:
1287 		return xfs_ioc_scrubv_metadata(filp, arg);
1288 	case XFS_IOC_SCRUB_METADATA:
1289 		return xfs_ioc_scrub_metadata(filp, arg);
1290 
1291 	case XFS_IOC_FD_TO_HANDLE:
1292 	case XFS_IOC_PATH_TO_HANDLE:
1293 	case XFS_IOC_PATH_TO_FSHANDLE: {
1294 		xfs_fsop_handlereq_t	hreq;
1295 
1296 		if (copy_from_user(&hreq, arg, sizeof(hreq)))
1297 			return -EFAULT;
1298 		return xfs_find_handle(cmd, &hreq);
1299 	}
1300 	case XFS_IOC_OPEN_BY_HANDLE: {
1301 		xfs_fsop_handlereq_t	hreq;
1302 
1303 		if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
1304 			return -EFAULT;
1305 		return xfs_open_by_handle(filp, &hreq);
1306 	}
1307 
1308 	case XFS_IOC_READLINK_BY_HANDLE: {
1309 		xfs_fsop_handlereq_t	hreq;
1310 
1311 		if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
1312 			return -EFAULT;
1313 		return xfs_readlink_by_handle(filp, &hreq);
1314 	}
1315 	case XFS_IOC_ATTRLIST_BY_HANDLE:
1316 		return xfs_attrlist_by_handle(filp, arg);
1317 
1318 	case XFS_IOC_ATTRMULTI_BY_HANDLE:
1319 		return xfs_attrmulti_by_handle(filp, arg);
1320 
1321 	case XFS_IOC_SWAPEXT: {
1322 		struct xfs_swapext	sxp;
1323 
1324 		if (copy_from_user(&sxp, arg, sizeof(xfs_swapext_t)))
1325 			return -EFAULT;
1326 		error = mnt_want_write_file(filp);
1327 		if (error)
1328 			return error;
1329 		error = xfs_ioc_swapext(&sxp);
1330 		mnt_drop_write_file(filp);
1331 		return error;
1332 	}
1333 
1334 	case XFS_IOC_FSCOUNTS:
1335 		return xfs_ioctl_fs_counts(mp, arg);
1336 
1337 	case XFS_IOC_SET_RESBLKS:
1338 	case XFS_IOC_GET_RESBLKS:
1339 		return xfs_ioctl_getset_resblocks(filp, cmd, arg);
1340 
1341 	case XFS_IOC_FSGROWFSDATA: {
1342 		struct xfs_growfs_data in;
1343 
1344 		if (copy_from_user(&in, arg, sizeof(in)))
1345 			return -EFAULT;
1346 
1347 		error = mnt_want_write_file(filp);
1348 		if (error)
1349 			return error;
1350 		error = xfs_growfs_data(mp, &in);
1351 		mnt_drop_write_file(filp);
1352 		return error;
1353 	}
1354 
1355 	case XFS_IOC_FSGROWFSLOG: {
1356 		struct xfs_growfs_log in;
1357 
1358 		if (copy_from_user(&in, arg, sizeof(in)))
1359 			return -EFAULT;
1360 
1361 		error = mnt_want_write_file(filp);
1362 		if (error)
1363 			return error;
1364 		error = xfs_growfs_log(mp, &in);
1365 		mnt_drop_write_file(filp);
1366 		return error;
1367 	}
1368 
1369 	case XFS_IOC_FSGROWFSRT: {
1370 		xfs_growfs_rt_t in;
1371 
1372 		if (copy_from_user(&in, arg, sizeof(in)))
1373 			return -EFAULT;
1374 
1375 		error = mnt_want_write_file(filp);
1376 		if (error)
1377 			return error;
1378 		error = xfs_growfs_rt(mp, &in);
1379 		mnt_drop_write_file(filp);
1380 		return error;
1381 	}
1382 
1383 	case XFS_IOC_GOINGDOWN: {
1384 		uint32_t in;
1385 
1386 		if (!capable(CAP_SYS_ADMIN))
1387 			return -EPERM;
1388 
1389 		if (get_user(in, (uint32_t __user *)arg))
1390 			return -EFAULT;
1391 
1392 		return xfs_fs_goingdown(mp, in);
1393 	}
1394 
1395 	case XFS_IOC_ERROR_INJECTION: {
1396 		xfs_error_injection_t in;
1397 
1398 		if (!capable(CAP_SYS_ADMIN))
1399 			return -EPERM;
1400 
1401 		if (copy_from_user(&in, arg, sizeof(in)))
1402 			return -EFAULT;
1403 
1404 		return xfs_errortag_add(mp, in.errtag);
1405 	}
1406 
1407 	case XFS_IOC_ERROR_CLEARALL:
1408 		if (!capable(CAP_SYS_ADMIN))
1409 			return -EPERM;
1410 
1411 		return xfs_errortag_clearall(mp);
1412 
1413 	case XFS_IOC_FREE_EOFBLOCKS: {
1414 		struct xfs_fs_eofblocks	eofb;
1415 		struct xfs_icwalk	icw;
1416 
1417 		if (!capable(CAP_SYS_ADMIN))
1418 			return -EPERM;
1419 
1420 		if (xfs_is_readonly(mp))
1421 			return -EROFS;
1422 
1423 		if (copy_from_user(&eofb, arg, sizeof(eofb)))
1424 			return -EFAULT;
1425 
1426 		error = xfs_fs_eofblocks_from_user(&eofb, &icw);
1427 		if (error)
1428 			return error;
1429 
1430 		trace_xfs_ioc_free_eofblocks(mp, &icw, _RET_IP_);
1431 
1432 		guard(super_write)(mp->m_super);
1433 		return xfs_blockgc_free_space(mp, &icw);
1434 	}
1435 
1436 	case XFS_IOC_EXCHANGE_RANGE:
1437 		return xfs_ioc_exchange_range(filp, arg);
1438 	case XFS_IOC_START_COMMIT:
1439 		return xfs_ioc_start_commit(filp, arg);
1440 	case XFS_IOC_COMMIT_RANGE:
1441 		return xfs_ioc_commit_range(filp, arg);
1442 
1443 	case XFS_IOC_HEALTH_MONITOR:
1444 		return xfs_ioc_health_monitor(filp, arg);
1445 	case XFS_IOC_VERIFY_MEDIA:
1446 		return xfs_ioc_verify_media(filp, arg);
1447 
1448 	default:
1449 		return -ENOTTY;
1450 	}
1451 }
1452