xref: /linux/fs/quota/dquot.c (revision 5414f3fd54b3a3f7f63f3edd276fb55281ecbe3b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Implementation of the diskquota system for the LINUX operating system. QUOTA
4  * is implemented using the BSD system call interface as the means of
5  * communication with the user level. This file contains the generic routines
6  * called by the different filesystems on allocation of an inode or block.
7  * These routines take care of the administration needed to have a consistent
8  * diskquota tracking system. The ideas of both user and group quotas are based
9  * on the Melbourne quota system as used on BSD derived systems. The internal
10  * implementation is based on one of the several variants of the LINUX
11  * inode-subsystem with added complexity of the diskquota system.
12  *
13  * Author:	Marco van Wieringen <mvw@planets.elm.net>
14  *
15  * Fixes:   Dmitry Gorodchanin <pgmdsg@ibi.com>, 11 Feb 96
16  *
17  *		Revised list management to avoid races
18  *		-- Bill Hawes, <whawes@star.net>, 9/98
19  *
20  *		Fixed races in dquot_transfer(), dqget() and dquot_alloc_...().
21  *		As the consequence the locking was moved from dquot_decr_...(),
22  *		dquot_incr_...() to calling functions.
23  *		invalidate_dquots() now writes modified dquots.
24  *		Serialized quota_off() and quota_on() for mount point.
25  *		Fixed a few bugs in grow_dquots().
26  *		Fixed deadlock in write_dquot() - we no longer account quotas on
27  *		quota files
28  *		remove_dquot_ref() moved to inode.c - it now traverses through inodes
29  *		add_dquot_ref() restarts after blocking
30  *		Added check for bogus uid and fixed check for group in quotactl.
31  *		Jan Kara, <jack@suse.cz>, sponsored by SuSE CR, 10-11/99
32  *
33  *		Used struct list_head instead of own list struct
34  *		Invalidation of referenced dquots is no longer possible
35  *		Improved free_dquots list management
36  *		Quota and i_blocks are now updated in one place to avoid races
37  *		Warnings are now delayed so we won't block in critical section
38  *		Write updated not to require dquot lock
39  *		Jan Kara, <jack@suse.cz>, 9/2000
40  *
41  *		Added dynamic quota structure allocation
42  *		Jan Kara <jack@suse.cz> 12/2000
43  *
44  *		Rewritten quota interface. Implemented new quota format and
45  *		formats registering.
46  *		Jan Kara, <jack@suse.cz>, 2001,2002
47  *
48  *		New SMP locking.
49  *		Jan Kara, <jack@suse.cz>, 10/2002
50  *
51  *		Added journalled quota support, fix lock inversion problems
52  *		Jan Kara, <jack@suse.cz>, 2003,2004
53  *
54  * (C) Copyright 1994 - 1997 Marco van Wieringen
55  */
56 
57 #include <linux/errno.h>
58 #include <linux/kernel.h>
59 #include <linux/fs.h>
60 #include <linux/mount.h>
61 #include <linux/mm.h>
62 #include <linux/time.h>
63 #include <linux/types.h>
64 #include <linux/string.h>
65 #include <linux/fcntl.h>
66 #include <linux/stat.h>
67 #include <linux/tty.h>
68 #include <linux/file.h>
69 #include <linux/slab.h>
70 #include <linux/sysctl.h>
71 #include <linux/init.h>
72 #include <linux/module.h>
73 #include <linux/proc_fs.h>
74 #include <linux/security.h>
75 #include <linux/sched.h>
76 #include <linux/cred.h>
77 #include <linux/kmod.h>
78 #include <linux/namei.h>
79 #include <linux/capability.h>
80 #include <linux/quotaops.h>
81 #include <linux/blkdev.h>
82 #include <linux/sched/mm.h>
83 
84 #include <linux/uaccess.h>
85 
86 /*
87  * There are five quota SMP locks:
88  * * dq_list_lock protects all lists with quotas and quota formats.
89  * * dquot->dq_dqb_lock protects data from dq_dqb
90  * * inode->i_lock protects inode->i_blocks, i_bytes and also guards
91  *   consistency of dquot->dq_dqb with inode->i_blocks, i_bytes so that
92  *   dquot_transfer() can stabilize amount it transfers
93  * * dq_data_lock protects mem_dqinfo structures and modifications of dquot
94  *   pointers in the inode
95  * * dq_state_lock protects modifications of quota state (on quotaon and
96  *   quotaoff) and readers who care about latest values take it as well.
97  *
98  * The spinlock ordering is hence:
99  *   dq_data_lock > dq_list_lock > i_lock > dquot->dq_dqb_lock,
100  *   dq_list_lock > dq_state_lock
101  *
102  * Note that some things (eg. sb pointer, type, id) doesn't change during
103  * the life of the dquot structure and so needn't to be protected by a lock
104  *
105  * Operation accessing dquots via inode pointers are protected by dquot_srcu.
106  * Operation of reading pointer needs srcu_read_lock(&dquot_srcu), and
107  * synchronize_srcu(&dquot_srcu) is called after clearing pointers from
108  * inode and before dropping dquot references to avoid use of dquots after
109  * they are freed. dq_data_lock is used to serialize the pointer setting and
110  * clearing operations.
111  * Special care needs to be taken about S_NOQUOTA inode flag (marking that
112  * inode is a quota file). Functions adding pointers from inode to dquots have
113  * to check this flag under dq_data_lock and then (if S_NOQUOTA is not set) they
114  * have to do all pointer modifications before dropping dq_data_lock. This makes
115  * sure they cannot race with quotaon which first sets S_NOQUOTA flag and
116  * then drops all pointers to dquots from an inode.
117  *
118  * Each dquot has its dq_lock mutex.  Dquot is locked when it is being read to
119  * memory (or space for it is being allocated) on the first dqget(), when it is
120  * being written out, and when it is being released on the last dqput(). The
121  * allocation and release operations are serialized by the dq_lock and by
122  * checking the use count in dquot_release().
123  *
124  * Lock ordering (including related VFS locks) is the following:
125  *   s_umount > i_mutex > journal_lock > dquot->dq_lock > dqio_sem
126  */
127 
128 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
129 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock);
130 __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
131 EXPORT_SYMBOL(dq_data_lock);
132 DEFINE_STATIC_SRCU(dquot_srcu);
133 
134 static DECLARE_WAIT_QUEUE_HEAD(dquot_ref_wq);
135 
__quota_error(struct super_block * sb,const char * func,const char * fmt,...)136 void __quota_error(struct super_block *sb, const char *func,
137 		   const char *fmt, ...)
138 {
139 	if (printk_ratelimit()) {
140 		va_list args;
141 		struct va_format vaf;
142 
143 		va_start(args, fmt);
144 
145 		vaf.fmt = fmt;
146 		vaf.va = &args;
147 
148 		printk(KERN_ERR "Quota error (device %s): %s: %pV\n",
149 		       sb->s_id, func, &vaf);
150 
151 		va_end(args);
152 	}
153 }
154 EXPORT_SYMBOL(__quota_error);
155 
156 #if defined(CONFIG_QUOTA_DEBUG) || defined(CONFIG_PRINT_QUOTA_WARNING)
157 static char *quotatypes[] = INITQFNAMES;
158 #endif
159 static struct quota_format_type *quota_formats;	/* List of registered formats */
160 static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
161 
162 /* SLAB cache for dquot structures */
163 static struct kmem_cache *dquot_cachep;
164 
165 /* workqueue for work quota_release_work*/
166 static struct workqueue_struct *quota_unbound_wq;
167 
register_quota_format(struct quota_format_type * fmt)168 void register_quota_format(struct quota_format_type *fmt)
169 {
170 	spin_lock(&dq_list_lock);
171 	fmt->qf_next = quota_formats;
172 	quota_formats = fmt;
173 	spin_unlock(&dq_list_lock);
174 }
175 EXPORT_SYMBOL(register_quota_format);
176 
unregister_quota_format(struct quota_format_type * fmt)177 void unregister_quota_format(struct quota_format_type *fmt)
178 {
179 	struct quota_format_type **actqf;
180 
181 	spin_lock(&dq_list_lock);
182 	for (actqf = &quota_formats; *actqf && *actqf != fmt;
183 	     actqf = &(*actqf)->qf_next)
184 		;
185 	if (*actqf)
186 		*actqf = (*actqf)->qf_next;
187 	spin_unlock(&dq_list_lock);
188 }
189 EXPORT_SYMBOL(unregister_quota_format);
190 
find_quota_format(int id)191 static struct quota_format_type *find_quota_format(int id)
192 {
193 	struct quota_format_type *actqf;
194 
195 	spin_lock(&dq_list_lock);
196 	for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
197 	     actqf = actqf->qf_next)
198 		;
199 	if (!actqf || !try_module_get(actqf->qf_owner)) {
200 		int qm;
201 
202 		spin_unlock(&dq_list_lock);
203 
204 		for (qm = 0; module_names[qm].qm_fmt_id &&
205 			     module_names[qm].qm_fmt_id != id; qm++)
206 			;
207 		if (!module_names[qm].qm_fmt_id ||
208 		    request_module(module_names[qm].qm_mod_name))
209 			return NULL;
210 
211 		spin_lock(&dq_list_lock);
212 		for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
213 		     actqf = actqf->qf_next)
214 			;
215 		if (actqf && !try_module_get(actqf->qf_owner))
216 			actqf = NULL;
217 	}
218 	spin_unlock(&dq_list_lock);
219 	return actqf;
220 }
221 
put_quota_format(struct quota_format_type * fmt)222 static void put_quota_format(struct quota_format_type *fmt)
223 {
224 	module_put(fmt->qf_owner);
225 }
226 
227 /*
228  * Dquot List Management:
229  * The quota code uses five lists for dquot management: the inuse_list,
230  * releasing_dquots, free_dquots, dqi_dirty_list, and dquot_hash[] array.
231  * A single dquot structure may be on some of those lists, depending on
232  * its current state.
233  *
234  * All dquots are placed to the end of inuse_list when first created, and this
235  * list is used for invalidate operation, which must look at every dquot.
236  *
237  * When the last reference of a dquot is dropped, the dquot is added to
238  * releasing_dquots. We'll then queue work item which will call
239  * synchronize_srcu() and after that perform the final cleanup of all the
240  * dquots on the list. Each cleaned up dquot is moved to free_dquots list.
241  * Both releasing_dquots and free_dquots use the dq_free list_head in the dquot
242  * struct.
243  *
244  * Unused and cleaned up dquots are in the free_dquots list and this list is
245  * searched whenever we need an available dquot. Dquots are removed from the
246  * list as soon as they are used again and dqstats.free_dquots gives the number
247  * of dquots on the list. When dquot is invalidated it's completely released
248  * from memory.
249  *
250  * Dirty dquots are added to the dqi_dirty_list of quota_info when mark
251  * dirtied, and this list is searched when writing dirty dquots back to
252  * quota file. Note that some filesystems do dirty dquot tracking on their
253  * own (e.g. in a journal) and thus don't use dqi_dirty_list.
254  *
255  * Dquots with a specific identity (device, type and id) are placed on
256  * one of the dquot_hash[] hash chains. The provides an efficient search
257  * mechanism to locate a specific dquot.
258  */
259 
260 static LIST_HEAD(inuse_list);
261 static LIST_HEAD(free_dquots);
262 static LIST_HEAD(releasing_dquots);
263 static unsigned int dq_hash_bits, dq_hash_mask;
264 static struct hlist_head *dquot_hash;
265 
266 struct dqstats dqstats;
267 EXPORT_SYMBOL(dqstats);
268 
269 static qsize_t inode_get_rsv_space(struct inode *inode);
270 static qsize_t __inode_get_rsv_space(struct inode *inode);
271 static int __dquot_initialize(struct inode *inode, int type);
272 
273 static void quota_release_workfn(struct work_struct *work);
274 static DECLARE_DELAYED_WORK(quota_release_work, quota_release_workfn);
275 
276 static inline unsigned int
hashfn(const struct super_block * sb,struct kqid qid)277 hashfn(const struct super_block *sb, struct kqid qid)
278 {
279 	unsigned int id = from_kqid(&init_user_ns, qid);
280 	int type = qid.type;
281 	unsigned long tmp;
282 
283 	tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type);
284 	return (tmp + (tmp >> dq_hash_bits)) & dq_hash_mask;
285 }
286 
287 /*
288  * Following list functions expect dq_list_lock to be held
289  */
insert_dquot_hash(struct dquot * dquot)290 static inline void insert_dquot_hash(struct dquot *dquot)
291 {
292 	struct hlist_head *head;
293 	head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id);
294 	hlist_add_head(&dquot->dq_hash, head);
295 }
296 
remove_dquot_hash(struct dquot * dquot)297 static inline void remove_dquot_hash(struct dquot *dquot)
298 {
299 	hlist_del_init(&dquot->dq_hash);
300 }
301 
find_dquot(unsigned int hashent,struct super_block * sb,struct kqid qid)302 static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb,
303 				struct kqid qid)
304 {
305 	struct dquot *dquot;
306 
307 	hlist_for_each_entry(dquot, dquot_hash+hashent, dq_hash)
308 		if (dquot->dq_sb == sb && qid_eq(dquot->dq_id, qid))
309 			return dquot;
310 
311 	return NULL;
312 }
313 
314 /* Add a dquot to the tail of the free list */
put_dquot_last(struct dquot * dquot)315 static inline void put_dquot_last(struct dquot *dquot)
316 {
317 	list_add_tail(&dquot->dq_free, &free_dquots);
318 	dqstats_inc(DQST_FREE_DQUOTS);
319 }
320 
put_releasing_dquots(struct dquot * dquot)321 static inline void put_releasing_dquots(struct dquot *dquot)
322 {
323 	list_add_tail(&dquot->dq_free, &releasing_dquots);
324 	set_bit(DQ_RELEASING_B, &dquot->dq_flags);
325 }
326 
remove_free_dquot(struct dquot * dquot)327 static inline void remove_free_dquot(struct dquot *dquot)
328 {
329 	if (list_empty(&dquot->dq_free))
330 		return;
331 	list_del_init(&dquot->dq_free);
332 	if (!test_bit(DQ_RELEASING_B, &dquot->dq_flags))
333 		dqstats_dec(DQST_FREE_DQUOTS);
334 	else
335 		clear_bit(DQ_RELEASING_B, &dquot->dq_flags);
336 }
337 
put_inuse(struct dquot * dquot)338 static inline void put_inuse(struct dquot *dquot)
339 {
340 	/* We add to the back of inuse list so we don't have to restart
341 	 * when traversing this list and we block */
342 	list_add_tail(&dquot->dq_inuse, &inuse_list);
343 	dqstats_inc(DQST_ALLOC_DQUOTS);
344 }
345 
remove_inuse(struct dquot * dquot)346 static inline void remove_inuse(struct dquot *dquot)
347 {
348 	dqstats_dec(DQST_ALLOC_DQUOTS);
349 	list_del(&dquot->dq_inuse);
350 }
351 /*
352  * End of list functions needing dq_list_lock
353  */
354 
wait_on_dquot(struct dquot * dquot)355 static void wait_on_dquot(struct dquot *dquot)
356 {
357 	mutex_lock(&dquot->dq_lock);
358 	mutex_unlock(&dquot->dq_lock);
359 }
360 
dquot_active(struct dquot * dquot)361 static inline int dquot_active(struct dquot *dquot)
362 {
363 	return test_bit(DQ_ACTIVE_B, &dquot->dq_flags);
364 }
365 
__dqgrab(struct dquot * dquot)366 static struct dquot *__dqgrab(struct dquot *dquot)
367 {
368 	lockdep_assert_held(&dq_list_lock);
369 	if (!atomic_read(&dquot->dq_count))
370 		remove_free_dquot(dquot);
371 	atomic_inc(&dquot->dq_count);
372 	return dquot;
373 }
374 
375 /*
376  * Get reference to dquot when we got pointer to it by some other means. The
377  * dquot has to be active and the caller has to make sure it cannot get
378  * deactivated under our hands.
379  */
dqgrab(struct dquot * dquot)380 struct dquot *dqgrab(struct dquot *dquot)
381 {
382 	spin_lock(&dq_list_lock);
383 	WARN_ON_ONCE(!dquot_active(dquot));
384 	dquot = __dqgrab(dquot);
385 	spin_unlock(&dq_list_lock);
386 
387 	return dquot;
388 }
389 EXPORT_SYMBOL_GPL(dqgrab);
390 
dquot_dirty(struct dquot * dquot)391 static inline int dquot_dirty(struct dquot *dquot)
392 {
393 	return test_bit(DQ_MOD_B, &dquot->dq_flags);
394 }
395 
mark_dquot_dirty(struct dquot * dquot)396 static inline int mark_dquot_dirty(struct dquot *dquot)
397 {
398 	return dquot->dq_sb->dq_op->mark_dirty(dquot);
399 }
400 
401 /* Mark dquot dirty in atomic manner, and return it's old dirty flag state */
dquot_mark_dquot_dirty(struct dquot * dquot)402 int dquot_mark_dquot_dirty(struct dquot *dquot)
403 {
404 	int ret = 1;
405 
406 	if (!dquot_active(dquot))
407 		return 0;
408 
409 	if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY)
410 		return test_and_set_bit(DQ_MOD_B, &dquot->dq_flags);
411 
412 	/* If quota is dirty already, we don't have to acquire dq_list_lock */
413 	if (dquot_dirty(dquot))
414 		return 1;
415 
416 	spin_lock(&dq_list_lock);
417 	if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) {
418 		list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)->
419 				info[dquot->dq_id.type].dqi_dirty_list);
420 		ret = 0;
421 	}
422 	spin_unlock(&dq_list_lock);
423 	return ret;
424 }
425 EXPORT_SYMBOL(dquot_mark_dquot_dirty);
426 
427 /* Dirtify all the dquots - this can block when journalling */
mark_all_dquot_dirty(struct dquot __rcu * const * dquots)428 static inline int mark_all_dquot_dirty(struct dquot __rcu * const *dquots)
429 {
430 	int ret, err, cnt;
431 	struct dquot *dquot;
432 
433 	ret = err = 0;
434 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
435 		dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
436 		if (dquot)
437 			/* Even in case of error we have to continue */
438 			ret = mark_dquot_dirty(dquot);
439 		if (!err && ret < 0)
440 			err = ret;
441 	}
442 	return err;
443 }
444 
dqput_all(struct dquot ** dquot)445 static inline void dqput_all(struct dquot **dquot)
446 {
447 	unsigned int cnt;
448 
449 	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
450 		dqput(dquot[cnt]);
451 }
452 
clear_dquot_dirty(struct dquot * dquot)453 static inline int clear_dquot_dirty(struct dquot *dquot)
454 {
455 	if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY)
456 		return test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags);
457 
458 	spin_lock(&dq_list_lock);
459 	if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags)) {
460 		spin_unlock(&dq_list_lock);
461 		return 0;
462 	}
463 	list_del_init(&dquot->dq_dirty);
464 	spin_unlock(&dq_list_lock);
465 	return 1;
466 }
467 
mark_info_dirty(struct super_block * sb,int type)468 void mark_info_dirty(struct super_block *sb, int type)
469 {
470 	spin_lock(&dq_data_lock);
471 	sb_dqopt(sb)->info[type].dqi_flags |= DQF_INFO_DIRTY;
472 	spin_unlock(&dq_data_lock);
473 }
474 EXPORT_SYMBOL(mark_info_dirty);
475 
476 /*
477  *	Read dquot from disk and alloc space for it
478  */
479 
dquot_acquire(struct dquot * dquot)480 int dquot_acquire(struct dquot *dquot)
481 {
482 	int ret = 0, ret2 = 0;
483 	unsigned int memalloc;
484 	struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
485 
486 	mutex_lock(&dquot->dq_lock);
487 	memalloc = memalloc_nofs_save();
488 	if (!test_bit(DQ_READ_B, &dquot->dq_flags)) {
489 		ret = dqopt->ops[dquot->dq_id.type]->read_dqblk(dquot);
490 		if (ret < 0)
491 			goto out_iolock;
492 	}
493 	/* Make sure flags update is visible after dquot has been filled */
494 	smp_mb__before_atomic();
495 	set_bit(DQ_READ_B, &dquot->dq_flags);
496 	/* Instantiate dquot if needed */
497 	if (!dquot_active(dquot) && !dquot->dq_off) {
498 		ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
499 		/* Write the info if needed */
500 		if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
501 			ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info(
502 					dquot->dq_sb, dquot->dq_id.type);
503 		}
504 		if (ret < 0)
505 			goto out_iolock;
506 		if (ret2 < 0) {
507 			ret = ret2;
508 			goto out_iolock;
509 		}
510 	}
511 	/*
512 	 * Make sure flags update is visible after on-disk struct has been
513 	 * allocated. Paired with smp_rmb() in dqget().
514 	 */
515 	smp_mb__before_atomic();
516 	set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
517 out_iolock:
518 	memalloc_nofs_restore(memalloc);
519 	mutex_unlock(&dquot->dq_lock);
520 	return ret;
521 }
522 EXPORT_SYMBOL(dquot_acquire);
523 
524 /*
525  *	Write dquot to disk
526  */
dquot_commit(struct dquot * dquot)527 int dquot_commit(struct dquot *dquot)
528 {
529 	int ret = 0;
530 	unsigned int memalloc;
531 	struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
532 
533 	mutex_lock(&dquot->dq_lock);
534 	memalloc = memalloc_nofs_save();
535 	if (!clear_dquot_dirty(dquot))
536 		goto out_lock;
537 	/* Inactive dquot can be only if there was error during read/init
538 	 * => we have better not writing it */
539 	if (dquot_active(dquot))
540 		ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
541 	else
542 		ret = -EIO;
543 out_lock:
544 	memalloc_nofs_restore(memalloc);
545 	mutex_unlock(&dquot->dq_lock);
546 	return ret;
547 }
548 EXPORT_SYMBOL(dquot_commit);
549 
550 /*
551  *	Release dquot
552  */
dquot_release(struct dquot * dquot)553 int dquot_release(struct dquot *dquot)
554 {
555 	int ret = 0, ret2 = 0;
556 	unsigned int memalloc;
557 	struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
558 
559 	mutex_lock(&dquot->dq_lock);
560 	memalloc = memalloc_nofs_save();
561 	/* Check whether we are not racing with some other dqget() */
562 	if (dquot_is_busy(dquot))
563 		goto out_dqlock;
564 	if (dqopt->ops[dquot->dq_id.type]->release_dqblk) {
565 		ret = dqopt->ops[dquot->dq_id.type]->release_dqblk(dquot);
566 		/* Write the info */
567 		if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
568 			ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info(
569 						dquot->dq_sb, dquot->dq_id.type);
570 		}
571 		if (ret >= 0)
572 			ret = ret2;
573 	}
574 	clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
575 out_dqlock:
576 	memalloc_nofs_restore(memalloc);
577 	mutex_unlock(&dquot->dq_lock);
578 	return ret;
579 }
580 EXPORT_SYMBOL(dquot_release);
581 
dquot_destroy(struct dquot * dquot)582 void dquot_destroy(struct dquot *dquot)
583 {
584 	kmem_cache_free(dquot_cachep, dquot);
585 }
586 EXPORT_SYMBOL(dquot_destroy);
587 
do_destroy_dquot(struct dquot * dquot)588 static inline void do_destroy_dquot(struct dquot *dquot)
589 {
590 	dquot->dq_sb->dq_op->destroy_dquot(dquot);
591 }
592 
593 /* Invalidate all dquots on the list. Note that this function is called after
594  * quota is disabled and pointers from inodes removed so there cannot be new
595  * quota users. There can still be some users of quotas due to inodes being
596  * just deleted or pruned by prune_icache() (those are not attached to any
597  * list) or parallel quotactl call. We have to wait for such users.
598  */
invalidate_dquots(struct super_block * sb,int type)599 static void invalidate_dquots(struct super_block *sb, int type)
600 {
601 	struct dquot *dquot, *tmp;
602 
603 restart:
604 	flush_delayed_work(&quota_release_work);
605 
606 	spin_lock(&dq_list_lock);
607 	list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) {
608 		if (dquot->dq_sb != sb)
609 			continue;
610 		if (dquot->dq_id.type != type)
611 			continue;
612 		/* Wait for dquot users */
613 		if (atomic_read(&dquot->dq_count)) {
614 			atomic_inc(&dquot->dq_count);
615 			spin_unlock(&dq_list_lock);
616 			/*
617 			 * Once dqput() wakes us up, we know it's time to free
618 			 * the dquot.
619 			 * IMPORTANT: we rely on the fact that there is always
620 			 * at most one process waiting for dquot to free.
621 			 * Otherwise dq_count would be > 1 and we would never
622 			 * wake up.
623 			 */
624 			wait_event(dquot_ref_wq,
625 				   atomic_read(&dquot->dq_count) == 1);
626 			dqput(dquot);
627 			/* At this moment dquot() need not exist (it could be
628 			 * reclaimed by prune_dqcache(). Hence we must
629 			 * restart. */
630 			goto restart;
631 		}
632 		/*
633 		 * The last user already dropped its reference but dquot didn't
634 		 * get fully cleaned up yet. Restart the scan which flushes the
635 		 * work cleaning up released dquots.
636 		 */
637 		if (test_bit(DQ_RELEASING_B, &dquot->dq_flags)) {
638 			spin_unlock(&dq_list_lock);
639 			goto restart;
640 		}
641 		/*
642 		 * Quota now has no users and it has been written on last
643 		 * dqput()
644 		 */
645 		remove_dquot_hash(dquot);
646 		remove_free_dquot(dquot);
647 		remove_inuse(dquot);
648 		do_destroy_dquot(dquot);
649 	}
650 	spin_unlock(&dq_list_lock);
651 }
652 
653 /* Call callback for every active dquot on given filesystem */
dquot_scan_active(struct super_block * sb,int (* fn)(struct dquot * dquot,unsigned long priv),unsigned long priv)654 int dquot_scan_active(struct super_block *sb,
655 		      int (*fn)(struct dquot *dquot, unsigned long priv),
656 		      unsigned long priv)
657 {
658 	struct dquot *dquot, *old_dquot = NULL;
659 	int ret = 0;
660 
661 	WARN_ON_ONCE(!rwsem_is_locked(&sb->s_umount));
662 
663 	spin_lock(&dq_list_lock);
664 	list_for_each_entry(dquot, &inuse_list, dq_inuse) {
665 		if (!dquot_active(dquot))
666 			continue;
667 		if (dquot->dq_sb != sb)
668 			continue;
669 		__dqgrab(dquot);
670 		spin_unlock(&dq_list_lock);
671 		dqput(old_dquot);
672 		old_dquot = dquot;
673 		/*
674 		 * ->release_dquot() can be racing with us. Our reference
675 		 * protects us from dquot_release() proceeding so just wait for
676 		 * any outstanding call and recheck the DQ_ACTIVE_B after that.
677 		 */
678 		wait_on_dquot(dquot);
679 		if (dquot_active(dquot)) {
680 			ret = fn(dquot, priv);
681 			if (ret < 0)
682 				goto out;
683 		}
684 		spin_lock(&dq_list_lock);
685 		/* We are safe to continue now because our dquot could not
686 		 * be moved out of the inuse list while we hold the reference */
687 	}
688 	spin_unlock(&dq_list_lock);
689 out:
690 	dqput(old_dquot);
691 	return ret;
692 }
693 EXPORT_SYMBOL(dquot_scan_active);
694 
dquot_write_dquot(struct dquot * dquot)695 static inline int dquot_write_dquot(struct dquot *dquot)
696 {
697 	int ret = dquot->dq_sb->dq_op->write_dquot(dquot);
698 	if (ret < 0) {
699 		quota_error(dquot->dq_sb, "Can't write quota structure "
700 			    "(error %d). Quota may get out of sync!", ret);
701 		/* Clear dirty bit anyway to avoid infinite loop. */
702 		clear_dquot_dirty(dquot);
703 	}
704 	return ret;
705 }
706 
707 /* Write all dquot structures to quota files */
dquot_writeback_dquots(struct super_block * sb,int type)708 int dquot_writeback_dquots(struct super_block *sb, int type)
709 {
710 	struct list_head dirty;
711 	struct dquot *dquot;
712 	struct quota_info *dqopt = sb_dqopt(sb);
713 	int cnt;
714 	int err, ret = 0;
715 
716 	WARN_ON_ONCE(!rwsem_is_locked(&sb->s_umount));
717 
718 	flush_delayed_work(&quota_release_work);
719 
720 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
721 		if (type != -1 && cnt != type)
722 			continue;
723 		if (!sb_has_quota_active(sb, cnt))
724 			continue;
725 		spin_lock(&dq_list_lock);
726 		/* Move list away to avoid livelock. */
727 		list_replace_init(&dqopt->info[cnt].dqi_dirty_list, &dirty);
728 		while (!list_empty(&dirty)) {
729 			dquot = list_first_entry(&dirty, struct dquot,
730 						 dq_dirty);
731 
732 			WARN_ON(!dquot_active(dquot));
733 			/* If the dquot is releasing we should not touch it */
734 			if (test_bit(DQ_RELEASING_B, &dquot->dq_flags)) {
735 				spin_unlock(&dq_list_lock);
736 				flush_delayed_work(&quota_release_work);
737 				spin_lock(&dq_list_lock);
738 				continue;
739 			}
740 
741 			/* Now we have active dquot from which someone is
742  			 * holding reference so we can safely just increase
743 			 * use count */
744 			__dqgrab(dquot);
745 			spin_unlock(&dq_list_lock);
746 			err = dquot_write_dquot(dquot);
747 			if (err && !ret)
748 				ret = err;
749 			dqput(dquot);
750 			spin_lock(&dq_list_lock);
751 		}
752 		spin_unlock(&dq_list_lock);
753 	}
754 
755 	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
756 		if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt)
757 		    && info_dirty(&dqopt->info[cnt]))
758 			sb->dq_op->write_info(sb, cnt);
759 	dqstats_inc(DQST_SYNCS);
760 
761 	return ret;
762 }
763 EXPORT_SYMBOL(dquot_writeback_dquots);
764 
765 /* Write all dquot structures to disk and make them visible from userspace */
dquot_quota_sync(struct super_block * sb,int type)766 int dquot_quota_sync(struct super_block *sb, int type)
767 {
768 	struct quota_info *dqopt = sb_dqopt(sb);
769 	int cnt;
770 	int ret;
771 
772 	ret = dquot_writeback_dquots(sb, type);
773 	if (ret)
774 		return ret;
775 	if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
776 		return 0;
777 
778 	/* This is not very clever (and fast) but currently I don't know about
779 	 * any other simple way of getting quota data to disk and we must get
780 	 * them there for userspace to be visible... */
781 	if (sb->s_op->sync_fs) {
782 		ret = sb->s_op->sync_fs(sb, 1);
783 		if (ret)
784 			return ret;
785 	}
786 	ret = sync_blockdev(sb->s_bdev);
787 	if (ret)
788 		return ret;
789 
790 	/*
791 	 * Now when everything is written we can discard the pagecache so
792 	 * that userspace sees the changes.
793 	 */
794 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
795 		if (type != -1 && cnt != type)
796 			continue;
797 		if (!sb_has_quota_active(sb, cnt))
798 			continue;
799 		inode_lock(dqopt->files[cnt]);
800 		truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
801 		inode_unlock(dqopt->files[cnt]);
802 	}
803 
804 	return 0;
805 }
806 EXPORT_SYMBOL(dquot_quota_sync);
807 
808 static unsigned long
dqcache_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)809 dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
810 {
811 	struct dquot *dquot;
812 	unsigned long freed = 0;
813 
814 	spin_lock(&dq_list_lock);
815 	while (!list_empty(&free_dquots) && sc->nr_to_scan) {
816 		dquot = list_first_entry(&free_dquots, struct dquot, dq_free);
817 		remove_dquot_hash(dquot);
818 		remove_free_dquot(dquot);
819 		remove_inuse(dquot);
820 		do_destroy_dquot(dquot);
821 		sc->nr_to_scan--;
822 		freed++;
823 	}
824 	spin_unlock(&dq_list_lock);
825 	return freed;
826 }
827 
828 static unsigned long
dqcache_shrink_count(struct shrinker * shrink,struct shrink_control * sc)829 dqcache_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
830 {
831 	return vfs_pressure_ratio(
832 	percpu_counter_read_positive(&dqstats.counter[DQST_FREE_DQUOTS]));
833 }
834 
835 /*
836  * Safely release dquot and put reference to dquot.
837  */
quota_release_workfn(struct work_struct * work)838 static void quota_release_workfn(struct work_struct *work)
839 {
840 	struct dquot *dquot;
841 	struct list_head rls_head;
842 
843 	spin_lock(&dq_list_lock);
844 	/* Exchange the list head to avoid livelock. */
845 	list_replace_init(&releasing_dquots, &rls_head);
846 	spin_unlock(&dq_list_lock);
847 	synchronize_srcu(&dquot_srcu);
848 
849 restart:
850 	spin_lock(&dq_list_lock);
851 	while (!list_empty(&rls_head)) {
852 		dquot = list_first_entry(&rls_head, struct dquot, dq_free);
853 		WARN_ON_ONCE(atomic_read(&dquot->dq_count));
854 		/*
855 		 * Note that DQ_RELEASING_B protects us from racing with
856 		 * invalidate_dquots() calls so we are safe to work with the
857 		 * dquot even after we drop dq_list_lock.
858 		 */
859 		if (dquot_dirty(dquot)) {
860 			spin_unlock(&dq_list_lock);
861 			/* Commit dquot before releasing */
862 			dquot_write_dquot(dquot);
863 			goto restart;
864 		}
865 		if (dquot_active(dquot)) {
866 			spin_unlock(&dq_list_lock);
867 			dquot->dq_sb->dq_op->release_dquot(dquot);
868 			goto restart;
869 		}
870 		/* Dquot is inactive and clean, now move it to free list */
871 		remove_free_dquot(dquot);
872 		put_dquot_last(dquot);
873 	}
874 	spin_unlock(&dq_list_lock);
875 }
876 
877 /*
878  * Put reference to dquot
879  */
dqput(struct dquot * dquot)880 void dqput(struct dquot *dquot)
881 {
882 	if (!dquot)
883 		return;
884 #ifdef CONFIG_QUOTA_DEBUG
885 	if (!atomic_read(&dquot->dq_count)) {
886 		quota_error(dquot->dq_sb, "trying to free free dquot of %s %d",
887 			    quotatypes[dquot->dq_id.type],
888 			    from_kqid(&init_user_ns, dquot->dq_id));
889 		BUG();
890 	}
891 #endif
892 	dqstats_inc(DQST_DROPS);
893 
894 	spin_lock(&dq_list_lock);
895 	if (atomic_read(&dquot->dq_count) > 1) {
896 		/* We have more than one user... nothing to do */
897 		atomic_dec(&dquot->dq_count);
898 		/* Releasing dquot during quotaoff phase? */
899 		if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_id.type) &&
900 		    atomic_read(&dquot->dq_count) == 1)
901 			wake_up(&dquot_ref_wq);
902 		spin_unlock(&dq_list_lock);
903 		return;
904 	}
905 
906 	/* Need to release dquot? */
907 	WARN_ON_ONCE(!list_empty(&dquot->dq_free));
908 	put_releasing_dquots(dquot);
909 	atomic_dec(&dquot->dq_count);
910 	spin_unlock(&dq_list_lock);
911 	queue_delayed_work(quota_unbound_wq, &quota_release_work, 1);
912 }
913 EXPORT_SYMBOL(dqput);
914 
dquot_alloc(struct super_block * sb,int type)915 struct dquot *dquot_alloc(struct super_block *sb, int type)
916 {
917 	return kmem_cache_zalloc(dquot_cachep, GFP_NOFS);
918 }
919 EXPORT_SYMBOL(dquot_alloc);
920 
get_empty_dquot(struct super_block * sb,int type)921 static struct dquot *get_empty_dquot(struct super_block *sb, int type)
922 {
923 	struct dquot *dquot;
924 
925 	dquot = sb->dq_op->alloc_dquot(sb, type);
926 	if(!dquot)
927 		return NULL;
928 
929 	mutex_init(&dquot->dq_lock);
930 	INIT_LIST_HEAD(&dquot->dq_free);
931 	INIT_LIST_HEAD(&dquot->dq_inuse);
932 	INIT_HLIST_NODE(&dquot->dq_hash);
933 	INIT_LIST_HEAD(&dquot->dq_dirty);
934 	dquot->dq_sb = sb;
935 	dquot->dq_id = make_kqid_invalid(type);
936 	atomic_set(&dquot->dq_count, 1);
937 	spin_lock_init(&dquot->dq_dqb_lock);
938 
939 	return dquot;
940 }
941 
942 /*
943  * Get reference to dquot
944  *
945  * Locking is slightly tricky here. We are guarded from parallel quotaoff()
946  * destroying our dquot by:
947  *   a) checking for quota flags under dq_list_lock and
948  *   b) getting a reference to dquot before we release dq_list_lock
949  */
dqget(struct super_block * sb,struct kqid qid)950 struct dquot *dqget(struct super_block *sb, struct kqid qid)
951 {
952 	unsigned int hashent = hashfn(sb, qid);
953 	struct dquot *dquot, *empty = NULL;
954 
955 	if (!qid_has_mapping(sb->s_user_ns, qid))
956 		return ERR_PTR(-EINVAL);
957 
958         if (!sb_has_quota_active(sb, qid.type))
959 		return ERR_PTR(-ESRCH);
960 we_slept:
961 	spin_lock(&dq_list_lock);
962 	spin_lock(&dq_state_lock);
963 	if (!sb_has_quota_active(sb, qid.type)) {
964 		spin_unlock(&dq_state_lock);
965 		spin_unlock(&dq_list_lock);
966 		dquot = ERR_PTR(-ESRCH);
967 		goto out;
968 	}
969 	spin_unlock(&dq_state_lock);
970 
971 	dquot = find_dquot(hashent, sb, qid);
972 	if (!dquot) {
973 		if (!empty) {
974 			spin_unlock(&dq_list_lock);
975 			empty = get_empty_dquot(sb, qid.type);
976 			if (!empty)
977 				schedule();	/* Try to wait for a moment... */
978 			goto we_slept;
979 		}
980 		dquot = empty;
981 		empty = NULL;
982 		dquot->dq_id = qid;
983 		/* all dquots go on the inuse_list */
984 		put_inuse(dquot);
985 		/* hash it first so it can be found */
986 		insert_dquot_hash(dquot);
987 		spin_unlock(&dq_list_lock);
988 		dqstats_inc(DQST_LOOKUPS);
989 	} else {
990 		__dqgrab(dquot);
991 		spin_unlock(&dq_list_lock);
992 		dqstats_inc(DQST_CACHE_HITS);
993 		dqstats_inc(DQST_LOOKUPS);
994 	}
995 	/* Wait for dq_lock - after this we know that either dquot_release() is
996 	 * already finished or it will be canceled due to dq_count > 0 test */
997 	wait_on_dquot(dquot);
998 	/* Read the dquot / allocate space in quota file */
999 	if (!dquot_active(dquot)) {
1000 		int err;
1001 
1002 		err = sb->dq_op->acquire_dquot(dquot);
1003 		if (err < 0) {
1004 			dqput(dquot);
1005 			dquot = ERR_PTR(err);
1006 			goto out;
1007 		}
1008 	}
1009 	/*
1010 	 * Make sure following reads see filled structure - paired with
1011 	 * smp_mb__before_atomic() in dquot_acquire().
1012 	 */
1013 	smp_rmb();
1014 	/* Has somebody invalidated entry under us? */
1015 	WARN_ON_ONCE(hlist_unhashed(&dquot->dq_hash));
1016 out:
1017 	if (empty)
1018 		do_destroy_dquot(empty);
1019 
1020 	return dquot;
1021 }
1022 EXPORT_SYMBOL(dqget);
1023 
i_dquot(struct inode * inode)1024 static inline struct dquot __rcu **i_dquot(struct inode *inode)
1025 {
1026 	return inode->i_sb->s_op->get_dquots(inode);
1027 }
1028 
dqinit_needed(struct inode * inode,int type)1029 static int dqinit_needed(struct inode *inode, int type)
1030 {
1031 	struct dquot __rcu * const *dquots;
1032 	int cnt;
1033 
1034 	if (IS_NOQUOTA(inode))
1035 		return 0;
1036 
1037 	dquots = i_dquot(inode);
1038 	if (type != -1)
1039 		return !dquots[type];
1040 	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1041 		if (!dquots[cnt])
1042 			return 1;
1043 	return 0;
1044 }
1045 
1046 /* This routine is guarded by s_umount semaphore */
add_dquot_ref(struct super_block * sb,int type)1047 static int add_dquot_ref(struct super_block *sb, int type)
1048 {
1049 	struct inode *inode, *old_inode = NULL;
1050 #ifdef CONFIG_QUOTA_DEBUG
1051 	int reserved = 0;
1052 #endif
1053 	int err = 0;
1054 
1055 	spin_lock(&sb->s_inode_list_lock);
1056 	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1057 		spin_lock(&inode->i_lock);
1058 		if ((inode_state_read(inode) & (I_FREEING | I_WILL_FREE | I_NEW)) ||
1059 		    !atomic_read(&inode->i_writecount) ||
1060 		    !dqinit_needed(inode, type)) {
1061 			spin_unlock(&inode->i_lock);
1062 			continue;
1063 		}
1064 		__iget(inode);
1065 		spin_unlock(&inode->i_lock);
1066 		spin_unlock(&sb->s_inode_list_lock);
1067 
1068 #ifdef CONFIG_QUOTA_DEBUG
1069 		if (unlikely(inode_get_rsv_space(inode) > 0))
1070 			reserved = 1;
1071 #endif
1072 		iput(old_inode);
1073 		err = __dquot_initialize(inode, type);
1074 		if (err) {
1075 			iput(inode);
1076 			goto out;
1077 		}
1078 
1079 		/*
1080 		 * We hold a reference to 'inode' so it couldn't have been
1081 		 * removed from s_inodes list while we dropped the
1082 		 * s_inode_list_lock. We cannot iput the inode now as we can be
1083 		 * holding the last reference and we cannot iput it under
1084 		 * s_inode_list_lock. So we keep the reference and iput it
1085 		 * later.
1086 		 */
1087 		old_inode = inode;
1088 		cond_resched();
1089 		spin_lock(&sb->s_inode_list_lock);
1090 	}
1091 	spin_unlock(&sb->s_inode_list_lock);
1092 	iput(old_inode);
1093 out:
1094 #ifdef CONFIG_QUOTA_DEBUG
1095 	if (reserved) {
1096 		quota_error(sb, "Writes happened before quota was turned on "
1097 			"thus quota information is probably inconsistent. "
1098 			"Please run quotacheck(8)");
1099 	}
1100 #endif
1101 	return err;
1102 }
1103 
remove_dquot_ref(struct super_block * sb,int type)1104 static void remove_dquot_ref(struct super_block *sb, int type)
1105 {
1106 	struct inode *inode;
1107 #ifdef CONFIG_QUOTA_DEBUG
1108 	int reserved = 0;
1109 #endif
1110 
1111 	spin_lock(&sb->s_inode_list_lock);
1112 	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1113 		/*
1114 		 *  We have to scan also I_NEW inodes because they can already
1115 		 *  have quota pointer initialized. Luckily, we need to touch
1116 		 *  only quota pointers and these have separate locking
1117 		 *  (dq_data_lock).
1118 		 */
1119 		spin_lock(&dq_data_lock);
1120 		if (!IS_NOQUOTA(inode)) {
1121 			struct dquot __rcu **dquots = i_dquot(inode);
1122 			struct dquot *dquot = srcu_dereference_check(
1123 				dquots[type], &dquot_srcu,
1124 				lockdep_is_held(&dq_data_lock));
1125 
1126 #ifdef CONFIG_QUOTA_DEBUG
1127 			if (unlikely(inode_get_rsv_space(inode) > 0))
1128 				reserved = 1;
1129 #endif
1130 			rcu_assign_pointer(dquots[type], NULL);
1131 			if (dquot)
1132 				dqput(dquot);
1133 		}
1134 		spin_unlock(&dq_data_lock);
1135 	}
1136 	spin_unlock(&sb->s_inode_list_lock);
1137 #ifdef CONFIG_QUOTA_DEBUG
1138 	if (reserved) {
1139 		printk(KERN_WARNING "VFS (%s): Writes happened after quota"
1140 			" was disabled thus quota information is probably "
1141 			"inconsistent. Please run quotacheck(8).\n", sb->s_id);
1142 	}
1143 #endif
1144 }
1145 
1146 /* Gather all references from inodes and drop them */
drop_dquot_ref(struct super_block * sb,int type)1147 static void drop_dquot_ref(struct super_block *sb, int type)
1148 {
1149 	if (sb->dq_op)
1150 		remove_dquot_ref(sb, type);
1151 }
1152 
1153 static inline
dquot_free_reserved_space(struct dquot * dquot,qsize_t number)1154 void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
1155 {
1156 	if (dquot->dq_dqb.dqb_rsvspace >= number)
1157 		dquot->dq_dqb.dqb_rsvspace -= number;
1158 	else {
1159 		WARN_ON_ONCE(1);
1160 		dquot->dq_dqb.dqb_rsvspace = 0;
1161 	}
1162 	if (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace <=
1163 	    dquot->dq_dqb.dqb_bsoftlimit)
1164 		dquot->dq_dqb.dqb_btime = (time64_t) 0;
1165 	clear_bit(DQ_BLKS_B, &dquot->dq_flags);
1166 }
1167 
dquot_decr_inodes(struct dquot * dquot,qsize_t number)1168 static void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
1169 {
1170 	if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
1171 	    dquot->dq_dqb.dqb_curinodes >= number)
1172 		dquot->dq_dqb.dqb_curinodes -= number;
1173 	else
1174 		dquot->dq_dqb.dqb_curinodes = 0;
1175 	if (dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit)
1176 		dquot->dq_dqb.dqb_itime = (time64_t) 0;
1177 	clear_bit(DQ_INODES_B, &dquot->dq_flags);
1178 }
1179 
dquot_decr_space(struct dquot * dquot,qsize_t number)1180 static void dquot_decr_space(struct dquot *dquot, qsize_t number)
1181 {
1182 	if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
1183 	    dquot->dq_dqb.dqb_curspace >= number)
1184 		dquot->dq_dqb.dqb_curspace -= number;
1185 	else
1186 		dquot->dq_dqb.dqb_curspace = 0;
1187 	if (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace <=
1188 	    dquot->dq_dqb.dqb_bsoftlimit)
1189 		dquot->dq_dqb.dqb_btime = (time64_t) 0;
1190 	clear_bit(DQ_BLKS_B, &dquot->dq_flags);
1191 }
1192 
1193 struct dquot_warn {
1194 	struct super_block *w_sb;
1195 	struct kqid w_dq_id;
1196 	short w_type;
1197 };
1198 
warning_issued(struct dquot * dquot,const int warntype)1199 static int warning_issued(struct dquot *dquot, const int warntype)
1200 {
1201 	int flag = (warntype == QUOTA_NL_BHARDWARN ||
1202 		warntype == QUOTA_NL_BSOFTLONGWARN) ? DQ_BLKS_B :
1203 		((warntype == QUOTA_NL_IHARDWARN ||
1204 		warntype == QUOTA_NL_ISOFTLONGWARN) ? DQ_INODES_B : 0);
1205 
1206 	if (!flag)
1207 		return 0;
1208 	return test_and_set_bit(flag, &dquot->dq_flags);
1209 }
1210 
1211 #ifdef CONFIG_PRINT_QUOTA_WARNING
1212 static int flag_print_warnings = 1;
1213 
need_print_warning(struct dquot_warn * warn)1214 static int need_print_warning(struct dquot_warn *warn)
1215 {
1216 	if (!flag_print_warnings)
1217 		return 0;
1218 
1219 	switch (warn->w_dq_id.type) {
1220 		case USRQUOTA:
1221 			return uid_eq(current_fsuid(), warn->w_dq_id.uid);
1222 		case GRPQUOTA:
1223 			return in_group_p(warn->w_dq_id.gid);
1224 		case PRJQUOTA:
1225 			return 1;
1226 	}
1227 	return 0;
1228 }
1229 
1230 /* Print warning to user which exceeded quota */
print_warning(struct dquot_warn * warn)1231 static void print_warning(struct dquot_warn *warn)
1232 {
1233 	char *msg = NULL;
1234 	struct tty_struct *tty;
1235 	int warntype = warn->w_type;
1236 
1237 	if (warntype == QUOTA_NL_IHARDBELOW ||
1238 	    warntype == QUOTA_NL_ISOFTBELOW ||
1239 	    warntype == QUOTA_NL_BHARDBELOW ||
1240 	    warntype == QUOTA_NL_BSOFTBELOW || !need_print_warning(warn))
1241 		return;
1242 
1243 	tty = get_current_tty();
1244 	if (!tty)
1245 		return;
1246 	tty_write_message(tty, warn->w_sb->s_id);
1247 	if (warntype == QUOTA_NL_ISOFTWARN || warntype == QUOTA_NL_BSOFTWARN)
1248 		tty_write_message(tty, ": warning, ");
1249 	else
1250 		tty_write_message(tty, ": write failed, ");
1251 	tty_write_message(tty, quotatypes[warn->w_dq_id.type]);
1252 	switch (warntype) {
1253 		case QUOTA_NL_IHARDWARN:
1254 			msg = " file limit reached.\r\n";
1255 			break;
1256 		case QUOTA_NL_ISOFTLONGWARN:
1257 			msg = " file quota exceeded too long.\r\n";
1258 			break;
1259 		case QUOTA_NL_ISOFTWARN:
1260 			msg = " file quota exceeded.\r\n";
1261 			break;
1262 		case QUOTA_NL_BHARDWARN:
1263 			msg = " block limit reached.\r\n";
1264 			break;
1265 		case QUOTA_NL_BSOFTLONGWARN:
1266 			msg = " block quota exceeded too long.\r\n";
1267 			break;
1268 		case QUOTA_NL_BSOFTWARN:
1269 			msg = " block quota exceeded.\r\n";
1270 			break;
1271 	}
1272 	tty_write_message(tty, msg);
1273 	tty_kref_put(tty);
1274 }
1275 #endif
1276 
prepare_warning(struct dquot_warn * warn,struct dquot * dquot,int warntype)1277 static void prepare_warning(struct dquot_warn *warn, struct dquot *dquot,
1278 			    int warntype)
1279 {
1280 	if (warning_issued(dquot, warntype))
1281 		return;
1282 	warn->w_type = warntype;
1283 	warn->w_sb = dquot->dq_sb;
1284 	warn->w_dq_id = dquot->dq_id;
1285 }
1286 
1287 /*
1288  * Write warnings to the console and send warning messages over netlink.
1289  *
1290  * Note that this function can call into tty and networking code.
1291  */
flush_warnings(struct dquot_warn * warn)1292 static void flush_warnings(struct dquot_warn *warn)
1293 {
1294 	int i;
1295 
1296 	for (i = 0; i < MAXQUOTAS; i++) {
1297 		if (warn[i].w_type == QUOTA_NL_NOWARN)
1298 			continue;
1299 #ifdef CONFIG_PRINT_QUOTA_WARNING
1300 		print_warning(&warn[i]);
1301 #endif
1302 		quota_send_warning(warn[i].w_dq_id,
1303 				   warn[i].w_sb->s_dev, warn[i].w_type);
1304 	}
1305 }
1306 
ignore_hardlimit(struct dquot * dquot)1307 static int ignore_hardlimit(struct dquot *dquot)
1308 {
1309 	struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
1310 
1311 	return capable(CAP_SYS_RESOURCE) &&
1312 	       (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
1313 		!(info->dqi_flags & DQF_ROOT_SQUASH));
1314 }
1315 
dquot_add_inodes(struct dquot * dquot,qsize_t inodes,struct dquot_warn * warn)1316 static int dquot_add_inodes(struct dquot *dquot, qsize_t inodes,
1317 			    struct dquot_warn *warn)
1318 {
1319 	qsize_t newinodes;
1320 	int ret = 0;
1321 
1322 	spin_lock(&dquot->dq_dqb_lock);
1323 	newinodes = dquot->dq_dqb.dqb_curinodes + inodes;
1324 	if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type) ||
1325 	    test_bit(DQ_FAKE_B, &dquot->dq_flags))
1326 		goto add;
1327 
1328 	if (dquot->dq_dqb.dqb_ihardlimit &&
1329 	    newinodes > dquot->dq_dqb.dqb_ihardlimit &&
1330             !ignore_hardlimit(dquot)) {
1331 		prepare_warning(warn, dquot, QUOTA_NL_IHARDWARN);
1332 		ret = -EDQUOT;
1333 		goto out;
1334 	}
1335 
1336 	if (dquot->dq_dqb.dqb_isoftlimit &&
1337 	    newinodes > dquot->dq_dqb.dqb_isoftlimit &&
1338 	    dquot->dq_dqb.dqb_itime &&
1339 	    ktime_get_real_seconds() >= dquot->dq_dqb.dqb_itime &&
1340             !ignore_hardlimit(dquot)) {
1341 		prepare_warning(warn, dquot, QUOTA_NL_ISOFTLONGWARN);
1342 		ret = -EDQUOT;
1343 		goto out;
1344 	}
1345 
1346 	if (dquot->dq_dqb.dqb_isoftlimit &&
1347 	    newinodes > dquot->dq_dqb.dqb_isoftlimit &&
1348 	    dquot->dq_dqb.dqb_itime == 0) {
1349 		prepare_warning(warn, dquot, QUOTA_NL_ISOFTWARN);
1350 		dquot->dq_dqb.dqb_itime = ktime_get_real_seconds() +
1351 		    sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type].dqi_igrace;
1352 	}
1353 add:
1354 	dquot->dq_dqb.dqb_curinodes = newinodes;
1355 
1356 out:
1357 	spin_unlock(&dquot->dq_dqb_lock);
1358 	return ret;
1359 }
1360 
dquot_add_space(struct dquot * dquot,qsize_t space,qsize_t rsv_space,unsigned int flags,struct dquot_warn * warn)1361 static int dquot_add_space(struct dquot *dquot, qsize_t space,
1362 			   qsize_t rsv_space, unsigned int flags,
1363 			   struct dquot_warn *warn)
1364 {
1365 	qsize_t tspace;
1366 	struct super_block *sb = dquot->dq_sb;
1367 	int ret = 0;
1368 
1369 	spin_lock(&dquot->dq_dqb_lock);
1370 	if (!sb_has_quota_limits_enabled(sb, dquot->dq_id.type) ||
1371 	    test_bit(DQ_FAKE_B, &dquot->dq_flags))
1372 		goto finish;
1373 
1374 	tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace
1375 		+ space + rsv_space;
1376 
1377 	if (dquot->dq_dqb.dqb_bhardlimit &&
1378 	    tspace > dquot->dq_dqb.dqb_bhardlimit &&
1379             !ignore_hardlimit(dquot)) {
1380 		if (flags & DQUOT_SPACE_WARN)
1381 			prepare_warning(warn, dquot, QUOTA_NL_BHARDWARN);
1382 		ret = -EDQUOT;
1383 		goto finish;
1384 	}
1385 
1386 	if (dquot->dq_dqb.dqb_bsoftlimit &&
1387 	    tspace > dquot->dq_dqb.dqb_bsoftlimit &&
1388 	    dquot->dq_dqb.dqb_btime &&
1389 	    ktime_get_real_seconds() >= dquot->dq_dqb.dqb_btime &&
1390             !ignore_hardlimit(dquot)) {
1391 		if (flags & DQUOT_SPACE_WARN)
1392 			prepare_warning(warn, dquot, QUOTA_NL_BSOFTLONGWARN);
1393 		ret = -EDQUOT;
1394 		goto finish;
1395 	}
1396 
1397 	if (dquot->dq_dqb.dqb_bsoftlimit &&
1398 	    tspace > dquot->dq_dqb.dqb_bsoftlimit &&
1399 	    dquot->dq_dqb.dqb_btime == 0) {
1400 		if (flags & DQUOT_SPACE_WARN) {
1401 			prepare_warning(warn, dquot, QUOTA_NL_BSOFTWARN);
1402 			dquot->dq_dqb.dqb_btime = ktime_get_real_seconds() +
1403 			    sb_dqopt(sb)->info[dquot->dq_id.type].dqi_bgrace;
1404 		} else {
1405 			/*
1406 			 * We don't allow preallocation to exceed softlimit so exceeding will
1407 			 * be always printed
1408 			 */
1409 			ret = -EDQUOT;
1410 			goto finish;
1411 		}
1412 	}
1413 finish:
1414 	/*
1415 	 * We have to be careful and go through warning generation & grace time
1416 	 * setting even if DQUOT_SPACE_NOFAIL is set. That's why we check it
1417 	 * only here...
1418 	 */
1419 	if (flags & DQUOT_SPACE_NOFAIL)
1420 		ret = 0;
1421 	if (!ret) {
1422 		dquot->dq_dqb.dqb_rsvspace += rsv_space;
1423 		dquot->dq_dqb.dqb_curspace += space;
1424 	}
1425 	spin_unlock(&dquot->dq_dqb_lock);
1426 	return ret;
1427 }
1428 
info_idq_free(struct dquot * dquot,qsize_t inodes)1429 static int info_idq_free(struct dquot *dquot, qsize_t inodes)
1430 {
1431 	qsize_t newinodes;
1432 
1433 	if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
1434 	    dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit ||
1435 	    !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type))
1436 		return QUOTA_NL_NOWARN;
1437 
1438 	newinodes = dquot->dq_dqb.dqb_curinodes - inodes;
1439 	if (newinodes <= dquot->dq_dqb.dqb_isoftlimit)
1440 		return QUOTA_NL_ISOFTBELOW;
1441 	if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit &&
1442 	    newinodes < dquot->dq_dqb.dqb_ihardlimit)
1443 		return QUOTA_NL_IHARDBELOW;
1444 	return QUOTA_NL_NOWARN;
1445 }
1446 
info_bdq_free(struct dquot * dquot,qsize_t space)1447 static int info_bdq_free(struct dquot *dquot, qsize_t space)
1448 {
1449 	qsize_t tspace;
1450 
1451 	tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace;
1452 
1453 	if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
1454 	    tspace <= dquot->dq_dqb.dqb_bsoftlimit)
1455 		return QUOTA_NL_NOWARN;
1456 
1457 	if (tspace - space <= dquot->dq_dqb.dqb_bsoftlimit)
1458 		return QUOTA_NL_BSOFTBELOW;
1459 	if (tspace >= dquot->dq_dqb.dqb_bhardlimit &&
1460 	    tspace - space < dquot->dq_dqb.dqb_bhardlimit)
1461 		return QUOTA_NL_BHARDBELOW;
1462 	return QUOTA_NL_NOWARN;
1463 }
1464 
inode_quota_active(const struct inode * inode)1465 static int inode_quota_active(const struct inode *inode)
1466 {
1467 	struct super_block *sb = inode->i_sb;
1468 
1469 	if (IS_NOQUOTA(inode))
1470 		return 0;
1471 	return sb_any_quota_loaded(sb) & ~sb_any_quota_suspended(sb);
1472 }
1473 
1474 /*
1475  * Initialize quota pointers in inode
1476  *
1477  * It is better to call this function outside of any transaction as it
1478  * might need a lot of space in journal for dquot structure allocation.
1479  */
__dquot_initialize(struct inode * inode,int type)1480 static int __dquot_initialize(struct inode *inode, int type)
1481 {
1482 	int cnt, init_needed = 0;
1483 	struct dquot __rcu **dquots;
1484 	struct dquot *got[MAXQUOTAS] = {};
1485 	struct super_block *sb = inode->i_sb;
1486 	qsize_t rsv;
1487 	int ret = 0;
1488 
1489 	if (!inode_quota_active(inode))
1490 		return 0;
1491 
1492 	dquots = i_dquot(inode);
1493 
1494 	/* First get references to structures we might need. */
1495 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1496 		struct kqid qid;
1497 		kprojid_t projid;
1498 		int rc;
1499 		struct dquot *dquot;
1500 
1501 		if (type != -1 && cnt != type)
1502 			continue;
1503 		/*
1504 		 * The i_dquot should have been initialized in most cases,
1505 		 * we check it without locking here to avoid unnecessary
1506 		 * dqget()/dqput() calls.
1507 		 */
1508 		if (dquots[cnt])
1509 			continue;
1510 
1511 		if (!sb_has_quota_active(sb, cnt))
1512 			continue;
1513 
1514 		init_needed = 1;
1515 
1516 		switch (cnt) {
1517 		case USRQUOTA:
1518 			qid = make_kqid_uid(inode->i_uid);
1519 			break;
1520 		case GRPQUOTA:
1521 			qid = make_kqid_gid(inode->i_gid);
1522 			break;
1523 		case PRJQUOTA:
1524 			rc = inode->i_sb->dq_op->get_projid(inode, &projid);
1525 			if (rc)
1526 				continue;
1527 			qid = make_kqid_projid(projid);
1528 			break;
1529 		}
1530 		dquot = dqget(sb, qid);
1531 		if (IS_ERR(dquot)) {
1532 			/* We raced with somebody turning quotas off... */
1533 			if (PTR_ERR(dquot) != -ESRCH) {
1534 				ret = PTR_ERR(dquot);
1535 				goto out_put;
1536 			}
1537 			dquot = NULL;
1538 		}
1539 		got[cnt] = dquot;
1540 	}
1541 
1542 	/* All required i_dquot has been initialized */
1543 	if (!init_needed)
1544 		return 0;
1545 
1546 	spin_lock(&dq_data_lock);
1547 	if (IS_NOQUOTA(inode))
1548 		goto out_lock;
1549 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1550 		if (type != -1 && cnt != type)
1551 			continue;
1552 		/* Avoid races with quotaoff() */
1553 		if (!sb_has_quota_active(sb, cnt))
1554 			continue;
1555 		/* We could race with quotaon or dqget() could have failed */
1556 		if (!got[cnt])
1557 			continue;
1558 		if (!dquots[cnt]) {
1559 			rcu_assign_pointer(dquots[cnt], got[cnt]);
1560 			got[cnt] = NULL;
1561 			/*
1562 			 * Make quota reservation system happy if someone
1563 			 * did a write before quota was turned on
1564 			 */
1565 			rsv = inode_get_rsv_space(inode);
1566 			if (unlikely(rsv)) {
1567 				struct dquot *dquot = srcu_dereference_check(
1568 					dquots[cnt], &dquot_srcu,
1569 					lockdep_is_held(&dq_data_lock));
1570 
1571 				spin_lock(&inode->i_lock);
1572 				/* Get reservation again under proper lock */
1573 				rsv = __inode_get_rsv_space(inode);
1574 				spin_lock(&dquot->dq_dqb_lock);
1575 				dquot->dq_dqb.dqb_rsvspace += rsv;
1576 				spin_unlock(&dquot->dq_dqb_lock);
1577 				spin_unlock(&inode->i_lock);
1578 			}
1579 		}
1580 	}
1581 out_lock:
1582 	spin_unlock(&dq_data_lock);
1583 out_put:
1584 	/* Drop unused references */
1585 	dqput_all(got);
1586 
1587 	return ret;
1588 }
1589 
dquot_initialize(struct inode * inode)1590 int dquot_initialize(struct inode *inode)
1591 {
1592 	return __dquot_initialize(inode, -1);
1593 }
1594 EXPORT_SYMBOL(dquot_initialize);
1595 
dquot_initialize_needed(struct inode * inode)1596 bool dquot_initialize_needed(struct inode *inode)
1597 {
1598 	struct dquot __rcu **dquots;
1599 	int i;
1600 
1601 	if (!inode_quota_active(inode))
1602 		return false;
1603 
1604 	dquots = i_dquot(inode);
1605 	for (i = 0; i < MAXQUOTAS; i++)
1606 		if (!dquots[i] && sb_has_quota_active(inode->i_sb, i))
1607 			return true;
1608 	return false;
1609 }
1610 EXPORT_SYMBOL(dquot_initialize_needed);
1611 
1612 /*
1613  * Release all quotas referenced by inode.
1614  *
1615  * This function only be called on inode free or converting
1616  * a file to quota file, no other users for the i_dquot in
1617  * both cases, so we needn't call synchronize_srcu() after
1618  * clearing i_dquot.
1619  */
__dquot_drop(struct inode * inode)1620 static void __dquot_drop(struct inode *inode)
1621 {
1622 	int cnt;
1623 	struct dquot __rcu **dquots = i_dquot(inode);
1624 	struct dquot *put[MAXQUOTAS];
1625 
1626 	spin_lock(&dq_data_lock);
1627 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1628 		put[cnt] = srcu_dereference_check(dquots[cnt], &dquot_srcu,
1629 					lockdep_is_held(&dq_data_lock));
1630 		rcu_assign_pointer(dquots[cnt], NULL);
1631 	}
1632 	spin_unlock(&dq_data_lock);
1633 	dqput_all(put);
1634 }
1635 
dquot_drop(struct inode * inode)1636 void dquot_drop(struct inode *inode)
1637 {
1638 	struct dquot __rcu * const *dquots;
1639 	int cnt;
1640 
1641 	if (IS_NOQUOTA(inode))
1642 		return;
1643 
1644 	/*
1645 	 * Test before calling to rule out calls from proc and such
1646 	 * where we are not allowed to block. Note that this is
1647 	 * actually reliable test even without the lock - the caller
1648 	 * must assure that nobody can come after the DQUOT_DROP and
1649 	 * add quota pointers back anyway.
1650 	 */
1651 	dquots = i_dquot(inode);
1652 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1653 		if (dquots[cnt])
1654 			break;
1655 	}
1656 
1657 	if (cnt < MAXQUOTAS)
1658 		__dquot_drop(inode);
1659 }
1660 EXPORT_SYMBOL(dquot_drop);
1661 
1662 /*
1663  * inode_reserved_space is managed internally by quota, and protected by
1664  * i_lock similar to i_blocks+i_bytes.
1665  */
inode_reserved_space(struct inode * inode)1666 static qsize_t *inode_reserved_space(struct inode * inode)
1667 {
1668 	/* Filesystem must explicitly define it's own method in order to use
1669 	 * quota reservation interface */
1670 	BUG_ON(!inode->i_sb->dq_op->get_reserved_space);
1671 	return inode->i_sb->dq_op->get_reserved_space(inode);
1672 }
1673 
__inode_get_rsv_space(struct inode * inode)1674 static qsize_t __inode_get_rsv_space(struct inode *inode)
1675 {
1676 	if (!inode->i_sb->dq_op->get_reserved_space)
1677 		return 0;
1678 	return *inode_reserved_space(inode);
1679 }
1680 
inode_get_rsv_space(struct inode * inode)1681 static qsize_t inode_get_rsv_space(struct inode *inode)
1682 {
1683 	qsize_t ret;
1684 
1685 	if (!inode->i_sb->dq_op->get_reserved_space)
1686 		return 0;
1687 	spin_lock(&inode->i_lock);
1688 	ret = __inode_get_rsv_space(inode);
1689 	spin_unlock(&inode->i_lock);
1690 	return ret;
1691 }
1692 
1693 /*
1694  * This functions updates i_blocks+i_bytes fields and quota information
1695  * (together with appropriate checks).
1696  *
1697  * NOTE: We absolutely rely on the fact that caller dirties the inode
1698  * (usually helpers in quotaops.h care about this) and holds a handle for
1699  * the current transaction so that dquot write and inode write go into the
1700  * same transaction.
1701  */
1702 
1703 /*
1704  * This operation can block, but only after everything is updated
1705  */
__dquot_alloc_space(struct inode * inode,qsize_t number,int flags)1706 int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
1707 {
1708 	int cnt, ret = 0, index;
1709 	struct dquot_warn warn[MAXQUOTAS];
1710 	int reserve = flags & DQUOT_SPACE_RESERVE;
1711 	struct dquot __rcu **dquots;
1712 	struct dquot *dquot;
1713 
1714 	if (!inode_quota_active(inode)) {
1715 		if (reserve) {
1716 			spin_lock(&inode->i_lock);
1717 			*inode_reserved_space(inode) += number;
1718 			spin_unlock(&inode->i_lock);
1719 		} else {
1720 			inode_add_bytes(inode, number);
1721 		}
1722 		goto out;
1723 	}
1724 
1725 	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1726 		warn[cnt].w_type = QUOTA_NL_NOWARN;
1727 
1728 	dquots = i_dquot(inode);
1729 	index = srcu_read_lock(&dquot_srcu);
1730 	spin_lock(&inode->i_lock);
1731 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1732 		dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
1733 		if (!dquot)
1734 			continue;
1735 		if (reserve) {
1736 			ret = dquot_add_space(dquot, 0, number, flags, &warn[cnt]);
1737 		} else {
1738 			ret = dquot_add_space(dquot, number, 0, flags, &warn[cnt]);
1739 		}
1740 		if (ret) {
1741 			/* Back out changes we already did */
1742 			for (cnt--; cnt >= 0; cnt--) {
1743 				dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
1744 				if (!dquot)
1745 					continue;
1746 				spin_lock(&dquot->dq_dqb_lock);
1747 				if (reserve)
1748 					dquot_free_reserved_space(dquot, number);
1749 				else
1750 					dquot_decr_space(dquot, number);
1751 				spin_unlock(&dquot->dq_dqb_lock);
1752 			}
1753 			spin_unlock(&inode->i_lock);
1754 			goto out_flush_warn;
1755 		}
1756 	}
1757 	if (reserve)
1758 		*inode_reserved_space(inode) += number;
1759 	else
1760 		__inode_add_bytes(inode, number);
1761 	spin_unlock(&inode->i_lock);
1762 
1763 	if (reserve)
1764 		goto out_flush_warn;
1765 	ret = mark_all_dquot_dirty(dquots);
1766 out_flush_warn:
1767 	srcu_read_unlock(&dquot_srcu, index);
1768 	flush_warnings(warn);
1769 out:
1770 	return ret;
1771 }
1772 EXPORT_SYMBOL(__dquot_alloc_space);
1773 
1774 /*
1775  * This operation can block, but only after everything is updated
1776  */
dquot_alloc_inode(struct inode * inode)1777 int dquot_alloc_inode(struct inode *inode)
1778 {
1779 	int cnt, ret = 0, index;
1780 	struct dquot_warn warn[MAXQUOTAS];
1781 	struct dquot __rcu * const *dquots;
1782 	struct dquot *dquot;
1783 
1784 	if (!inode_quota_active(inode))
1785 		return 0;
1786 	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1787 		warn[cnt].w_type = QUOTA_NL_NOWARN;
1788 
1789 	dquots = i_dquot(inode);
1790 	index = srcu_read_lock(&dquot_srcu);
1791 	spin_lock(&inode->i_lock);
1792 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1793 		dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
1794 		if (!dquot)
1795 			continue;
1796 		ret = dquot_add_inodes(dquot, 1, &warn[cnt]);
1797 		if (ret) {
1798 			for (cnt--; cnt >= 0; cnt--) {
1799 				dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
1800 				if (!dquot)
1801 					continue;
1802 				/* Back out changes we already did */
1803 				spin_lock(&dquot->dq_dqb_lock);
1804 				dquot_decr_inodes(dquot, 1);
1805 				spin_unlock(&dquot->dq_dqb_lock);
1806 			}
1807 			goto warn_put_all;
1808 		}
1809 	}
1810 
1811 warn_put_all:
1812 	spin_unlock(&inode->i_lock);
1813 	if (ret == 0)
1814 		ret = mark_all_dquot_dirty(dquots);
1815 	srcu_read_unlock(&dquot_srcu, index);
1816 	flush_warnings(warn);
1817 	return ret;
1818 }
1819 EXPORT_SYMBOL(dquot_alloc_inode);
1820 
1821 /*
1822  * Convert in-memory reserved quotas to real consumed quotas
1823  */
dquot_claim_space_nodirty(struct inode * inode,qsize_t number)1824 void dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
1825 {
1826 	struct dquot __rcu **dquots;
1827 	struct dquot *dquot;
1828 	int cnt, index;
1829 
1830 	if (!inode_quota_active(inode)) {
1831 		spin_lock(&inode->i_lock);
1832 		*inode_reserved_space(inode) -= number;
1833 		__inode_add_bytes(inode, number);
1834 		spin_unlock(&inode->i_lock);
1835 		return;
1836 	}
1837 
1838 	dquots = i_dquot(inode);
1839 	index = srcu_read_lock(&dquot_srcu);
1840 	spin_lock(&inode->i_lock);
1841 	/* Claim reserved quotas to allocated quotas */
1842 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1843 		dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
1844 		if (dquot) {
1845 			spin_lock(&dquot->dq_dqb_lock);
1846 			if (WARN_ON_ONCE(dquot->dq_dqb.dqb_rsvspace < number))
1847 				number = dquot->dq_dqb.dqb_rsvspace;
1848 			dquot->dq_dqb.dqb_curspace += number;
1849 			dquot->dq_dqb.dqb_rsvspace -= number;
1850 			spin_unlock(&dquot->dq_dqb_lock);
1851 		}
1852 	}
1853 	/* Update inode bytes */
1854 	*inode_reserved_space(inode) -= number;
1855 	__inode_add_bytes(inode, number);
1856 	spin_unlock(&inode->i_lock);
1857 	mark_all_dquot_dirty(dquots);
1858 	srcu_read_unlock(&dquot_srcu, index);
1859 }
1860 EXPORT_SYMBOL(dquot_claim_space_nodirty);
1861 
1862 /*
1863  * Convert allocated space back to in-memory reserved quotas
1864  */
dquot_reclaim_space_nodirty(struct inode * inode,qsize_t number)1865 void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
1866 {
1867 	struct dquot __rcu **dquots;
1868 	struct dquot *dquot;
1869 	int cnt, index;
1870 
1871 	if (!inode_quota_active(inode)) {
1872 		spin_lock(&inode->i_lock);
1873 		*inode_reserved_space(inode) += number;
1874 		__inode_sub_bytes(inode, number);
1875 		spin_unlock(&inode->i_lock);
1876 		return;
1877 	}
1878 
1879 	dquots = i_dquot(inode);
1880 	index = srcu_read_lock(&dquot_srcu);
1881 	spin_lock(&inode->i_lock);
1882 	/* Claim reserved quotas to allocated quotas */
1883 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1884 		dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
1885 		if (dquot) {
1886 			spin_lock(&dquot->dq_dqb_lock);
1887 			if (WARN_ON_ONCE(dquot->dq_dqb.dqb_curspace < number))
1888 				number = dquot->dq_dqb.dqb_curspace;
1889 			dquot->dq_dqb.dqb_rsvspace += number;
1890 			dquot->dq_dqb.dqb_curspace -= number;
1891 			spin_unlock(&dquot->dq_dqb_lock);
1892 		}
1893 	}
1894 	/* Update inode bytes */
1895 	*inode_reserved_space(inode) += number;
1896 	__inode_sub_bytes(inode, number);
1897 	spin_unlock(&inode->i_lock);
1898 	mark_all_dquot_dirty(dquots);
1899 	srcu_read_unlock(&dquot_srcu, index);
1900 }
1901 EXPORT_SYMBOL(dquot_reclaim_space_nodirty);
1902 
1903 /*
1904  * This operation can block, but only after everything is updated
1905  */
__dquot_free_space(struct inode * inode,qsize_t number,int flags)1906 void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
1907 {
1908 	unsigned int cnt;
1909 	struct dquot_warn warn[MAXQUOTAS];
1910 	struct dquot __rcu **dquots;
1911 	struct dquot *dquot;
1912 	int reserve = flags & DQUOT_SPACE_RESERVE, index;
1913 
1914 	if (!inode_quota_active(inode)) {
1915 		if (reserve) {
1916 			spin_lock(&inode->i_lock);
1917 			*inode_reserved_space(inode) -= number;
1918 			spin_unlock(&inode->i_lock);
1919 		} else {
1920 			inode_sub_bytes(inode, number);
1921 		}
1922 		return;
1923 	}
1924 
1925 	dquots = i_dquot(inode);
1926 	index = srcu_read_lock(&dquot_srcu);
1927 	spin_lock(&inode->i_lock);
1928 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1929 		int wtype;
1930 
1931 		warn[cnt].w_type = QUOTA_NL_NOWARN;
1932 		dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
1933 		if (!dquot)
1934 			continue;
1935 		spin_lock(&dquot->dq_dqb_lock);
1936 		wtype = info_bdq_free(dquot, number);
1937 		if (wtype != QUOTA_NL_NOWARN)
1938 			prepare_warning(&warn[cnt], dquot, wtype);
1939 		if (reserve)
1940 			dquot_free_reserved_space(dquot, number);
1941 		else
1942 			dquot_decr_space(dquot, number);
1943 		spin_unlock(&dquot->dq_dqb_lock);
1944 	}
1945 	if (reserve)
1946 		*inode_reserved_space(inode) -= number;
1947 	else
1948 		__inode_sub_bytes(inode, number);
1949 	spin_unlock(&inode->i_lock);
1950 
1951 	if (reserve)
1952 		goto out_unlock;
1953 	mark_all_dquot_dirty(dquots);
1954 out_unlock:
1955 	srcu_read_unlock(&dquot_srcu, index);
1956 	flush_warnings(warn);
1957 }
1958 EXPORT_SYMBOL(__dquot_free_space);
1959 
1960 /*
1961  * This operation can block, but only after everything is updated
1962  */
dquot_free_inode(struct inode * inode)1963 void dquot_free_inode(struct inode *inode)
1964 {
1965 	unsigned int cnt;
1966 	struct dquot_warn warn[MAXQUOTAS];
1967 	struct dquot __rcu * const *dquots;
1968 	struct dquot *dquot;
1969 	int index;
1970 
1971 	if (!inode_quota_active(inode))
1972 		return;
1973 
1974 	dquots = i_dquot(inode);
1975 	index = srcu_read_lock(&dquot_srcu);
1976 	spin_lock(&inode->i_lock);
1977 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1978 		int wtype;
1979 		warn[cnt].w_type = QUOTA_NL_NOWARN;
1980 		dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
1981 		if (!dquot)
1982 			continue;
1983 		spin_lock(&dquot->dq_dqb_lock);
1984 		wtype = info_idq_free(dquot, 1);
1985 		if (wtype != QUOTA_NL_NOWARN)
1986 			prepare_warning(&warn[cnt], dquot, wtype);
1987 		dquot_decr_inodes(dquot, 1);
1988 		spin_unlock(&dquot->dq_dqb_lock);
1989 	}
1990 	spin_unlock(&inode->i_lock);
1991 	mark_all_dquot_dirty(dquots);
1992 	srcu_read_unlock(&dquot_srcu, index);
1993 	flush_warnings(warn);
1994 }
1995 EXPORT_SYMBOL(dquot_free_inode);
1996 
1997 /*
1998  * Transfer the number of inode and blocks from one diskquota to an other.
1999  * On success, dquot references in transfer_to are consumed and references
2000  * to original dquots that need to be released are placed there. On failure,
2001  * references are kept untouched.
2002  *
2003  * This operation can block, but only after everything is updated
2004  * A transaction must be started when entering this function.
2005  *
2006  * We are holding reference on transfer_from & transfer_to, no need to
2007  * protect them by srcu_read_lock().
2008  */
__dquot_transfer(struct inode * inode,struct dquot ** transfer_to)2009 int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
2010 {
2011 	qsize_t cur_space;
2012 	qsize_t rsv_space = 0;
2013 	qsize_t inode_usage = 1;
2014 	struct dquot __rcu **dquots;
2015 	struct dquot *transfer_from[MAXQUOTAS] = {};
2016 	int cnt, index, ret = 0, err;
2017 	char is_valid[MAXQUOTAS] = {};
2018 	struct dquot_warn warn_to[MAXQUOTAS];
2019 	struct dquot_warn warn_from_inodes[MAXQUOTAS];
2020 	struct dquot_warn warn_from_space[MAXQUOTAS];
2021 
2022 	if (IS_NOQUOTA(inode))
2023 		return 0;
2024 
2025 	if (inode->i_sb->dq_op->get_inode_usage) {
2026 		ret = inode->i_sb->dq_op->get_inode_usage(inode, &inode_usage);
2027 		if (ret)
2028 			return ret;
2029 	}
2030 
2031 	/* Initialize the arrays */
2032 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2033 		warn_to[cnt].w_type = QUOTA_NL_NOWARN;
2034 		warn_from_inodes[cnt].w_type = QUOTA_NL_NOWARN;
2035 		warn_from_space[cnt].w_type = QUOTA_NL_NOWARN;
2036 	}
2037 
2038 	spin_lock(&dq_data_lock);
2039 	spin_lock(&inode->i_lock);
2040 	if (IS_NOQUOTA(inode)) {	/* File without quota accounting? */
2041 		spin_unlock(&inode->i_lock);
2042 		spin_unlock(&dq_data_lock);
2043 		return 0;
2044 	}
2045 	cur_space = __inode_get_bytes(inode);
2046 	rsv_space = __inode_get_rsv_space(inode);
2047 	dquots = i_dquot(inode);
2048 	/*
2049 	 * Build the transfer_from list, check limits, and update usage in
2050 	 * the target structures.
2051 	 */
2052 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2053 		/*
2054 		 * Skip changes for same uid or gid or for turned off quota-type.
2055 		 */
2056 		if (!transfer_to[cnt])
2057 			continue;
2058 		/* Avoid races with quotaoff() */
2059 		if (!sb_has_quota_active(inode->i_sb, cnt))
2060 			continue;
2061 		is_valid[cnt] = 1;
2062 		transfer_from[cnt] = srcu_dereference_check(dquots[cnt],
2063 				&dquot_srcu, lockdep_is_held(&dq_data_lock));
2064 		ret = dquot_add_inodes(transfer_to[cnt], inode_usage,
2065 				       &warn_to[cnt]);
2066 		if (ret)
2067 			goto over_quota;
2068 		ret = dquot_add_space(transfer_to[cnt], cur_space, rsv_space,
2069 				      DQUOT_SPACE_WARN, &warn_to[cnt]);
2070 		if (ret) {
2071 			spin_lock(&transfer_to[cnt]->dq_dqb_lock);
2072 			dquot_decr_inodes(transfer_to[cnt], inode_usage);
2073 			spin_unlock(&transfer_to[cnt]->dq_dqb_lock);
2074 			goto over_quota;
2075 		}
2076 	}
2077 
2078 	/* Decrease usage for source structures and update quota pointers */
2079 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2080 		if (!is_valid[cnt])
2081 			continue;
2082 		/* Due to IO error we might not have transfer_from[] structure */
2083 		if (transfer_from[cnt]) {
2084 			int wtype;
2085 
2086 			spin_lock(&transfer_from[cnt]->dq_dqb_lock);
2087 			wtype = info_idq_free(transfer_from[cnt], inode_usage);
2088 			if (wtype != QUOTA_NL_NOWARN)
2089 				prepare_warning(&warn_from_inodes[cnt],
2090 						transfer_from[cnt], wtype);
2091 			wtype = info_bdq_free(transfer_from[cnt],
2092 					      cur_space + rsv_space);
2093 			if (wtype != QUOTA_NL_NOWARN)
2094 				prepare_warning(&warn_from_space[cnt],
2095 						transfer_from[cnt], wtype);
2096 			dquot_decr_inodes(transfer_from[cnt], inode_usage);
2097 			dquot_decr_space(transfer_from[cnt], cur_space);
2098 			dquot_free_reserved_space(transfer_from[cnt],
2099 						  rsv_space);
2100 			spin_unlock(&transfer_from[cnt]->dq_dqb_lock);
2101 		}
2102 		rcu_assign_pointer(dquots[cnt], transfer_to[cnt]);
2103 	}
2104 	spin_unlock(&inode->i_lock);
2105 	spin_unlock(&dq_data_lock);
2106 
2107 	/*
2108 	 * These arrays are local and we hold dquot references so we don't need
2109 	 * the srcu protection but still take dquot_srcu to avoid warning in
2110 	 * mark_all_dquot_dirty().
2111 	 */
2112 	index = srcu_read_lock(&dquot_srcu);
2113 	err = mark_all_dquot_dirty((struct dquot __rcu **)transfer_from);
2114 	if (err < 0)
2115 		ret = err;
2116 	err = mark_all_dquot_dirty((struct dquot __rcu **)transfer_to);
2117 	if (err < 0)
2118 		ret = err;
2119 	srcu_read_unlock(&dquot_srcu, index);
2120 
2121 	flush_warnings(warn_to);
2122 	flush_warnings(warn_from_inodes);
2123 	flush_warnings(warn_from_space);
2124 	/* Pass back references to put */
2125 	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
2126 		if (is_valid[cnt])
2127 			transfer_to[cnt] = transfer_from[cnt];
2128 	return ret;
2129 over_quota:
2130 	/* Back out changes we already did */
2131 	for (cnt--; cnt >= 0; cnt--) {
2132 		if (!is_valid[cnt])
2133 			continue;
2134 		spin_lock(&transfer_to[cnt]->dq_dqb_lock);
2135 		dquot_decr_inodes(transfer_to[cnt], inode_usage);
2136 		dquot_decr_space(transfer_to[cnt], cur_space);
2137 		dquot_free_reserved_space(transfer_to[cnt], rsv_space);
2138 		spin_unlock(&transfer_to[cnt]->dq_dqb_lock);
2139 	}
2140 	spin_unlock(&inode->i_lock);
2141 	spin_unlock(&dq_data_lock);
2142 	flush_warnings(warn_to);
2143 	return ret;
2144 }
2145 EXPORT_SYMBOL(__dquot_transfer);
2146 
2147 /* Wrapper for transferring ownership of an inode for uid/gid only
2148  * Called from FSXXX_setattr()
2149  */
dquot_transfer(struct mnt_idmap * idmap,struct inode * inode,struct iattr * iattr)2150 int dquot_transfer(struct mnt_idmap *idmap, struct inode *inode,
2151 		   struct iattr *iattr)
2152 {
2153 	struct dquot *transfer_to[MAXQUOTAS] = {};
2154 	struct dquot *dquot;
2155 	struct super_block *sb = inode->i_sb;
2156 	int ret;
2157 
2158 	if (!inode_quota_active(inode))
2159 		return 0;
2160 
2161 	if (i_uid_needs_update(idmap, iattr, inode)) {
2162 		kuid_t kuid = from_vfsuid(idmap, i_user_ns(inode),
2163 					  iattr->ia_vfsuid);
2164 
2165 		dquot = dqget(sb, make_kqid_uid(kuid));
2166 		if (IS_ERR(dquot)) {
2167 			if (PTR_ERR(dquot) != -ESRCH) {
2168 				ret = PTR_ERR(dquot);
2169 				goto out_put;
2170 			}
2171 			dquot = NULL;
2172 		}
2173 		transfer_to[USRQUOTA] = dquot;
2174 	}
2175 	if (i_gid_needs_update(idmap, iattr, inode)) {
2176 		kgid_t kgid = from_vfsgid(idmap, i_user_ns(inode),
2177 					  iattr->ia_vfsgid);
2178 
2179 		dquot = dqget(sb, make_kqid_gid(kgid));
2180 		if (IS_ERR(dquot)) {
2181 			if (PTR_ERR(dquot) != -ESRCH) {
2182 				ret = PTR_ERR(dquot);
2183 				goto out_put;
2184 			}
2185 			dquot = NULL;
2186 		}
2187 		transfer_to[GRPQUOTA] = dquot;
2188 	}
2189 	ret = __dquot_transfer(inode, transfer_to);
2190 out_put:
2191 	dqput_all(transfer_to);
2192 	return ret;
2193 }
2194 EXPORT_SYMBOL(dquot_transfer);
2195 
2196 /*
2197  * Write info of quota file to disk
2198  */
dquot_commit_info(struct super_block * sb,int type)2199 int dquot_commit_info(struct super_block *sb, int type)
2200 {
2201 	struct quota_info *dqopt = sb_dqopt(sb);
2202 
2203 	return dqopt->ops[type]->write_file_info(sb, type);
2204 }
2205 EXPORT_SYMBOL(dquot_commit_info);
2206 
dquot_get_next_id(struct super_block * sb,struct kqid * qid)2207 int dquot_get_next_id(struct super_block *sb, struct kqid *qid)
2208 {
2209 	struct quota_info *dqopt = sb_dqopt(sb);
2210 
2211 	if (!sb_has_quota_active(sb, qid->type))
2212 		return -ESRCH;
2213 	if (!dqopt->ops[qid->type]->get_next_id)
2214 		return -ENOSYS;
2215 	return dqopt->ops[qid->type]->get_next_id(sb, qid);
2216 }
2217 EXPORT_SYMBOL(dquot_get_next_id);
2218 
2219 /*
2220  * Definitions of diskquota operations.
2221  */
2222 const struct dquot_operations dquot_operations = {
2223 	.write_dquot	= dquot_commit,
2224 	.acquire_dquot	= dquot_acquire,
2225 	.release_dquot	= dquot_release,
2226 	.mark_dirty	= dquot_mark_dquot_dirty,
2227 	.write_info	= dquot_commit_info,
2228 	.alloc_dquot	= dquot_alloc,
2229 	.destroy_dquot	= dquot_destroy,
2230 	.get_next_id	= dquot_get_next_id,
2231 };
2232 EXPORT_SYMBOL(dquot_operations);
2233 
2234 /*
2235  * Generic helper for ->open on filesystems supporting disk quotas.
2236  */
dquot_file_open(struct inode * inode,struct file * file)2237 int dquot_file_open(struct inode *inode, struct file *file)
2238 {
2239 	int error;
2240 
2241 	error = generic_file_open(inode, file);
2242 	if (!error && (file->f_mode & FMODE_WRITE))
2243 		error = dquot_initialize(inode);
2244 	return error;
2245 }
2246 EXPORT_SYMBOL(dquot_file_open);
2247 
vfs_cleanup_quota_inode(struct super_block * sb,int type)2248 static void vfs_cleanup_quota_inode(struct super_block *sb, int type)
2249 {
2250 	struct quota_info *dqopt = sb_dqopt(sb);
2251 	struct inode *inode = dqopt->files[type];
2252 
2253 	if (!inode)
2254 		return;
2255 	if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
2256 		inode_lock(inode);
2257 		inode->i_flags &= ~S_NOQUOTA;
2258 		inode_unlock(inode);
2259 	}
2260 	dqopt->files[type] = NULL;
2261 	iput(inode);
2262 }
2263 
2264 /*
2265  * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount)
2266  */
dquot_disable(struct super_block * sb,int type,unsigned int flags)2267 int dquot_disable(struct super_block *sb, int type, unsigned int flags)
2268 {
2269 	int cnt;
2270 	struct quota_info *dqopt = sb_dqopt(sb);
2271 
2272 	rwsem_assert_held_write(&sb->s_umount);
2273 
2274 	/* Cannot turn off usage accounting without turning off limits, or
2275 	 * suspend quotas and simultaneously turn quotas off. */
2276 	if ((flags & DQUOT_USAGE_ENABLED && !(flags & DQUOT_LIMITS_ENABLED))
2277 	    || (flags & DQUOT_SUSPENDED && flags & (DQUOT_LIMITS_ENABLED |
2278 	    DQUOT_USAGE_ENABLED)))
2279 		return -EINVAL;
2280 
2281 	/*
2282 	 * Skip everything if there's nothing to do. We have to do this because
2283 	 * sometimes we are called when fill_super() failed and calling
2284 	 * sync_fs() in such cases does no good.
2285 	 */
2286 	if (!sb_any_quota_loaded(sb))
2287 		return 0;
2288 
2289 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2290 		if (type != -1 && cnt != type)
2291 			continue;
2292 		if (!sb_has_quota_loaded(sb, cnt))
2293 			continue;
2294 
2295 		if (flags & DQUOT_SUSPENDED) {
2296 			spin_lock(&dq_state_lock);
2297 			dqopt->flags |=
2298 				dquot_state_flag(DQUOT_SUSPENDED, cnt);
2299 			spin_unlock(&dq_state_lock);
2300 		} else {
2301 			spin_lock(&dq_state_lock);
2302 			dqopt->flags &= ~dquot_state_flag(flags, cnt);
2303 			/* Turning off suspended quotas? */
2304 			if (!sb_has_quota_loaded(sb, cnt) &&
2305 			    sb_has_quota_suspended(sb, cnt)) {
2306 				dqopt->flags &=	~dquot_state_flag(
2307 							DQUOT_SUSPENDED, cnt);
2308 				spin_unlock(&dq_state_lock);
2309 				vfs_cleanup_quota_inode(sb, cnt);
2310 				continue;
2311 			}
2312 			spin_unlock(&dq_state_lock);
2313 		}
2314 
2315 		/* We still have to keep quota loaded? */
2316 		if (sb_has_quota_loaded(sb, cnt) && !(flags & DQUOT_SUSPENDED))
2317 			continue;
2318 
2319 		/* Note: these are blocking operations */
2320 		drop_dquot_ref(sb, cnt);
2321 		invalidate_dquots(sb, cnt);
2322 		/*
2323 		 * Now all dquots should be invalidated, all writes done so we
2324 		 * should be only users of the info. No locks needed.
2325 		 */
2326 		if (info_dirty(&dqopt->info[cnt]))
2327 			sb->dq_op->write_info(sb, cnt);
2328 		if (dqopt->ops[cnt]->free_file_info)
2329 			dqopt->ops[cnt]->free_file_info(sb, cnt);
2330 		put_quota_format(dqopt->info[cnt].dqi_format);
2331 		dqopt->info[cnt].dqi_flags = 0;
2332 		dqopt->info[cnt].dqi_igrace = 0;
2333 		dqopt->info[cnt].dqi_bgrace = 0;
2334 		dqopt->ops[cnt] = NULL;
2335 	}
2336 
2337 	/* Skip syncing and setting flags if quota files are hidden */
2338 	if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
2339 		goto put_inodes;
2340 
2341 	/* Sync the superblock so that buffers with quota data are written to
2342 	 * disk (and so userspace sees correct data afterwards). */
2343 	if (sb->s_op->sync_fs)
2344 		sb->s_op->sync_fs(sb, 1);
2345 	sync_blockdev(sb->s_bdev);
2346 	/* Now the quota files are just ordinary files and we can set the
2347 	 * inode flags back. Moreover we discard the pagecache so that
2348 	 * userspace sees the writes we did bypassing the pagecache. We
2349 	 * must also discard the blockdev buffers so that we see the
2350 	 * changes done by userspace on the next quotaon() */
2351 	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
2352 		if (!sb_has_quota_loaded(sb, cnt) && dqopt->files[cnt]) {
2353 			inode_lock(dqopt->files[cnt]);
2354 			truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
2355 			inode_unlock(dqopt->files[cnt]);
2356 		}
2357 	if (sb->s_bdev)
2358 		invalidate_bdev(sb->s_bdev);
2359 put_inodes:
2360 	/* We are done when suspending quotas */
2361 	if (flags & DQUOT_SUSPENDED)
2362 		return 0;
2363 
2364 	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
2365 		if (!sb_has_quota_loaded(sb, cnt))
2366 			vfs_cleanup_quota_inode(sb, cnt);
2367 	return 0;
2368 }
2369 EXPORT_SYMBOL(dquot_disable);
2370 
dquot_quota_off(struct super_block * sb,int type)2371 int dquot_quota_off(struct super_block *sb, int type)
2372 {
2373 	return dquot_disable(sb, type,
2374 			     DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
2375 }
2376 EXPORT_SYMBOL(dquot_quota_off);
2377 
2378 /*
2379  *	Turn quotas on on a device
2380  */
2381 
vfs_setup_quota_inode(struct inode * inode,int type)2382 static int vfs_setup_quota_inode(struct inode *inode, int type)
2383 {
2384 	struct super_block *sb = inode->i_sb;
2385 	struct quota_info *dqopt = sb_dqopt(sb);
2386 
2387 	if (is_bad_inode(inode))
2388 		return -EUCLEAN;
2389 	if (!S_ISREG(inode->i_mode))
2390 		return -EACCES;
2391 	if (IS_RDONLY(inode))
2392 		return -EROFS;
2393 	if (sb_has_quota_loaded(sb, type))
2394 		return -EBUSY;
2395 
2396 	/*
2397 	 * Quota files should never be encrypted.  They should be thought of as
2398 	 * filesystem metadata, not user data.  New-style internal quota files
2399 	 * cannot be encrypted by users anyway, but old-style external quota
2400 	 * files could potentially be incorrectly created in an encrypted
2401 	 * directory, hence this explicit check.  Some reasons why encrypted
2402 	 * quota files don't work include: (1) some filesystems that support
2403 	 * encryption don't handle it in their quota_read and quota_write, and
2404 	 * (2) cleaning up encrypted quota files at unmount would need special
2405 	 * consideration, as quota files are cleaned up later than user files.
2406 	 */
2407 	if (IS_ENCRYPTED(inode))
2408 		return -EINVAL;
2409 
2410 	dqopt->files[type] = igrab(inode);
2411 	if (!dqopt->files[type])
2412 		return -EIO;
2413 	if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
2414 		/* We don't want quota and atime on quota files (deadlocks
2415 		 * possible) Also nobody should write to the file - we use
2416 		 * special IO operations which ignore the immutable bit. */
2417 		inode_lock(inode);
2418 		inode->i_flags |= S_NOQUOTA;
2419 		inode_unlock(inode);
2420 		/*
2421 		 * When S_NOQUOTA is set, remove dquot references as no more
2422 		 * references can be added
2423 		 */
2424 		__dquot_drop(inode);
2425 	}
2426 	return 0;
2427 }
2428 
dquot_load_quota_sb(struct super_block * sb,int type,int format_id,unsigned int flags)2429 int dquot_load_quota_sb(struct super_block *sb, int type, int format_id,
2430 	unsigned int flags)
2431 {
2432 	struct quota_format_type *fmt;
2433 	struct quota_info *dqopt = sb_dqopt(sb);
2434 	int error;
2435 
2436 	lockdep_assert_held_write(&sb->s_umount);
2437 
2438 	/* Just unsuspend quotas? */
2439 	if (WARN_ON_ONCE(flags & DQUOT_SUSPENDED))
2440 		return -EINVAL;
2441 
2442 	fmt = find_quota_format(format_id);
2443 	if (!fmt)
2444 		return -ESRCH;
2445 	if (!sb->dq_op || !sb->s_qcop ||
2446 	    (type == PRJQUOTA && sb->dq_op->get_projid == NULL)) {
2447 		error = -EINVAL;
2448 		goto out_fmt;
2449 	}
2450 	/* Filesystems outside of init_user_ns not yet supported */
2451 	if (sb->s_user_ns != &init_user_ns) {
2452 		error = -EINVAL;
2453 		goto out_fmt;
2454 	}
2455 	/* Usage always has to be set... */
2456 	if (!(flags & DQUOT_USAGE_ENABLED)) {
2457 		error = -EINVAL;
2458 		goto out_fmt;
2459 	}
2460 	if (sb_has_quota_loaded(sb, type)) {
2461 		error = -EBUSY;
2462 		goto out_fmt;
2463 	}
2464 
2465 	if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
2466 		/* As we bypass the pagecache we must now flush all the
2467 		 * dirty data and invalidate caches so that kernel sees
2468 		 * changes from userspace. It is not enough to just flush
2469 		 * the quota file since if blocksize < pagesize, invalidation
2470 		 * of the cache could fail because of other unrelated dirty
2471 		 * data */
2472 		sync_filesystem(sb);
2473 		invalidate_bdev(sb->s_bdev);
2474 	}
2475 
2476 	error = -EINVAL;
2477 	if (!fmt->qf_ops->check_quota_file(sb, type))
2478 		goto out_fmt;
2479 
2480 	dqopt->ops[type] = fmt->qf_ops;
2481 	dqopt->info[type].dqi_format = fmt;
2482 	dqopt->info[type].dqi_fmt_id = format_id;
2483 	INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
2484 	error = dqopt->ops[type]->read_file_info(sb, type);
2485 	if (error < 0)
2486 		goto out_fmt;
2487 	if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) {
2488 		spin_lock(&dq_data_lock);
2489 		dqopt->info[type].dqi_flags |= DQF_SYS_FILE;
2490 		spin_unlock(&dq_data_lock);
2491 	}
2492 	spin_lock(&dq_state_lock);
2493 	dqopt->flags |= dquot_state_flag(flags, type);
2494 	spin_unlock(&dq_state_lock);
2495 
2496 	error = add_dquot_ref(sb, type);
2497 	if (error)
2498 		dquot_disable(sb, type,
2499 			      DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
2500 
2501 	return error;
2502 out_fmt:
2503 	put_quota_format(fmt);
2504 
2505 	return error;
2506 }
2507 EXPORT_SYMBOL(dquot_load_quota_sb);
2508 
2509 /*
2510  * More powerful function for turning on quotas on given quota inode allowing
2511  * setting of individual quota flags
2512  */
dquot_load_quota_inode(struct inode * inode,int type,int format_id,unsigned int flags)2513 int dquot_load_quota_inode(struct inode *inode, int type, int format_id,
2514 	unsigned int flags)
2515 {
2516 	int err;
2517 
2518 	err = vfs_setup_quota_inode(inode, type);
2519 	if (err < 0)
2520 		return err;
2521 	err = dquot_load_quota_sb(inode->i_sb, type, format_id, flags);
2522 	if (err < 0)
2523 		vfs_cleanup_quota_inode(inode->i_sb, type);
2524 	return err;
2525 }
2526 EXPORT_SYMBOL(dquot_load_quota_inode);
2527 
2528 /* Reenable quotas on remount RW */
dquot_resume(struct super_block * sb,int type)2529 int dquot_resume(struct super_block *sb, int type)
2530 {
2531 	struct quota_info *dqopt = sb_dqopt(sb);
2532 	int ret = 0, cnt;
2533 	unsigned int flags;
2534 
2535 	rwsem_assert_held_write(&sb->s_umount);
2536 
2537 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2538 		if (type != -1 && cnt != type)
2539 			continue;
2540 		if (!sb_has_quota_suspended(sb, cnt))
2541 			continue;
2542 
2543 		spin_lock(&dq_state_lock);
2544 		flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED |
2545 							DQUOT_LIMITS_ENABLED,
2546 							cnt);
2547 		dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, cnt);
2548 		spin_unlock(&dq_state_lock);
2549 
2550 		flags = dquot_generic_flag(flags, cnt);
2551 		ret = dquot_load_quota_sb(sb, cnt, dqopt->info[cnt].dqi_fmt_id,
2552 					  flags);
2553 		if (ret < 0)
2554 			vfs_cleanup_quota_inode(sb, cnt);
2555 	}
2556 
2557 	return ret;
2558 }
2559 EXPORT_SYMBOL(dquot_resume);
2560 
dquot_quota_on(struct super_block * sb,int type,int format_id,const struct path * path)2561 int dquot_quota_on(struct super_block *sb, int type, int format_id,
2562 		   const struct path *path)
2563 {
2564 	int error = security_quota_on(path->dentry);
2565 	if (error)
2566 		return error;
2567 	/* Quota file not on the same filesystem? */
2568 	if (path->dentry->d_sb != sb)
2569 		error = -EXDEV;
2570 	else
2571 		error = dquot_load_quota_inode(d_inode(path->dentry), type,
2572 					     format_id, DQUOT_USAGE_ENABLED |
2573 					     DQUOT_LIMITS_ENABLED);
2574 	return error;
2575 }
2576 EXPORT_SYMBOL(dquot_quota_on);
2577 
2578 /*
2579  * This function is used when filesystem needs to initialize quotas
2580  * during mount time.
2581  */
dquot_quota_on_mount(struct super_block * sb,char * qf_name,int format_id,int type)2582 int dquot_quota_on_mount(struct super_block *sb, char *qf_name,
2583 		int format_id, int type)
2584 {
2585 	struct dentry *dentry;
2586 	int error;
2587 
2588 	dentry = lookup_noperm_positive_unlocked(&QSTR(qf_name), sb->s_root);
2589 	if (IS_ERR(dentry))
2590 		return PTR_ERR(dentry);
2591 
2592 	error = security_quota_on(dentry);
2593 	if (!error)
2594 		error = dquot_load_quota_inode(d_inode(dentry), type, format_id,
2595 				DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
2596 
2597 	dput(dentry);
2598 	return error;
2599 }
2600 EXPORT_SYMBOL(dquot_quota_on_mount);
2601 
dquot_quota_enable(struct super_block * sb,unsigned int flags)2602 static int dquot_quota_enable(struct super_block *sb, unsigned int flags)
2603 {
2604 	int ret;
2605 	int type;
2606 	struct quota_info *dqopt = sb_dqopt(sb);
2607 
2608 	if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE))
2609 		return -ENOSYS;
2610 	/* Accounting cannot be turned on while fs is mounted */
2611 	flags &= ~(FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT);
2612 	if (!flags)
2613 		return -EINVAL;
2614 	for (type = 0; type < MAXQUOTAS; type++) {
2615 		if (!(flags & qtype_enforce_flag(type)))
2616 			continue;
2617 		/* Can't enforce without accounting */
2618 		if (!sb_has_quota_usage_enabled(sb, type)) {
2619 			ret = -EINVAL;
2620 			goto out_err;
2621 		}
2622 		if (sb_has_quota_limits_enabled(sb, type)) {
2623 			/* compatible with XFS */
2624 			ret = -EEXIST;
2625 			goto out_err;
2626 		}
2627 		spin_lock(&dq_state_lock);
2628 		dqopt->flags |= dquot_state_flag(DQUOT_LIMITS_ENABLED, type);
2629 		spin_unlock(&dq_state_lock);
2630 	}
2631 	return 0;
2632 out_err:
2633 	/* Backout enforcement enablement we already did */
2634 	for (type--; type >= 0; type--)  {
2635 		if (flags & qtype_enforce_flag(type))
2636 			dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
2637 	}
2638 	return ret;
2639 }
2640 
dquot_quota_disable(struct super_block * sb,unsigned int flags)2641 static int dquot_quota_disable(struct super_block *sb, unsigned int flags)
2642 {
2643 	int ret;
2644 	int type;
2645 	struct quota_info *dqopt = sb_dqopt(sb);
2646 
2647 	if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE))
2648 		return -ENOSYS;
2649 	/*
2650 	 * We don't support turning off accounting via quotactl. In principle
2651 	 * quota infrastructure can do this but filesystems don't expect
2652 	 * userspace to be able to do it.
2653 	 */
2654 	if (flags &
2655 		  (FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT))
2656 		return -EOPNOTSUPP;
2657 
2658 	/* Filter out limits not enabled */
2659 	for (type = 0; type < MAXQUOTAS; type++)
2660 		if (!sb_has_quota_limits_enabled(sb, type))
2661 			flags &= ~qtype_enforce_flag(type);
2662 	/* Nothing left? */
2663 	if (!flags)
2664 		return -EEXIST;
2665 	for (type = 0; type < MAXQUOTAS; type++) {
2666 		if (flags & qtype_enforce_flag(type)) {
2667 			ret = dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
2668 			if (ret < 0)
2669 				goto out_err;
2670 		}
2671 	}
2672 	return 0;
2673 out_err:
2674 	/* Backout enforcement disabling we already did */
2675 	for (type--; type >= 0; type--)  {
2676 		if (flags & qtype_enforce_flag(type)) {
2677 			spin_lock(&dq_state_lock);
2678 			dqopt->flags |=
2679 				dquot_state_flag(DQUOT_LIMITS_ENABLED, type);
2680 			spin_unlock(&dq_state_lock);
2681 		}
2682 	}
2683 	return ret;
2684 }
2685 
2686 /* Generic routine for getting common part of quota structure */
do_get_dqblk(struct dquot * dquot,struct qc_dqblk * di)2687 static void do_get_dqblk(struct dquot *dquot, struct qc_dqblk *di)
2688 {
2689 	struct mem_dqblk *dm = &dquot->dq_dqb;
2690 
2691 	memset(di, 0, sizeof(*di));
2692 	spin_lock(&dquot->dq_dqb_lock);
2693 	di->d_spc_hardlimit = dm->dqb_bhardlimit;
2694 	di->d_spc_softlimit = dm->dqb_bsoftlimit;
2695 	di->d_ino_hardlimit = dm->dqb_ihardlimit;
2696 	di->d_ino_softlimit = dm->dqb_isoftlimit;
2697 	di->d_space = dm->dqb_curspace + dm->dqb_rsvspace;
2698 	di->d_ino_count = dm->dqb_curinodes;
2699 	di->d_spc_timer = dm->dqb_btime;
2700 	di->d_ino_timer = dm->dqb_itime;
2701 	spin_unlock(&dquot->dq_dqb_lock);
2702 }
2703 
dquot_get_dqblk(struct super_block * sb,struct kqid qid,struct qc_dqblk * di)2704 int dquot_get_dqblk(struct super_block *sb, struct kqid qid,
2705 		    struct qc_dqblk *di)
2706 {
2707 	struct dquot *dquot;
2708 
2709 	dquot = dqget(sb, qid);
2710 	if (IS_ERR(dquot))
2711 		return PTR_ERR(dquot);
2712 	do_get_dqblk(dquot, di);
2713 	dqput(dquot);
2714 
2715 	return 0;
2716 }
2717 EXPORT_SYMBOL(dquot_get_dqblk);
2718 
dquot_get_next_dqblk(struct super_block * sb,struct kqid * qid,struct qc_dqblk * di)2719 int dquot_get_next_dqblk(struct super_block *sb, struct kqid *qid,
2720 			 struct qc_dqblk *di)
2721 {
2722 	struct dquot *dquot;
2723 	int err;
2724 
2725 	if (!sb->dq_op->get_next_id)
2726 		return -ENOSYS;
2727 	err = sb->dq_op->get_next_id(sb, qid);
2728 	if (err < 0)
2729 		return err;
2730 	dquot = dqget(sb, *qid);
2731 	if (IS_ERR(dquot))
2732 		return PTR_ERR(dquot);
2733 	do_get_dqblk(dquot, di);
2734 	dqput(dquot);
2735 
2736 	return 0;
2737 }
2738 EXPORT_SYMBOL(dquot_get_next_dqblk);
2739 
2740 #define VFS_QC_MASK \
2741 	(QC_SPACE | QC_SPC_SOFT | QC_SPC_HARD | \
2742 	 QC_INO_COUNT | QC_INO_SOFT | QC_INO_HARD | \
2743 	 QC_SPC_TIMER | QC_INO_TIMER)
2744 
2745 /* Generic routine for setting common part of quota structure */
do_set_dqblk(struct dquot * dquot,struct qc_dqblk * di)2746 static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di)
2747 {
2748 	struct mem_dqblk *dm = &dquot->dq_dqb;
2749 	int check_blim = 0, check_ilim = 0;
2750 	struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
2751 	int ret;
2752 
2753 	if (di->d_fieldmask & ~VFS_QC_MASK)
2754 		return -EINVAL;
2755 
2756 	if (((di->d_fieldmask & QC_SPC_SOFT) &&
2757 	     di->d_spc_softlimit > dqi->dqi_max_spc_limit) ||
2758 	    ((di->d_fieldmask & QC_SPC_HARD) &&
2759 	     di->d_spc_hardlimit > dqi->dqi_max_spc_limit) ||
2760 	    ((di->d_fieldmask & QC_INO_SOFT) &&
2761 	     (di->d_ino_softlimit > dqi->dqi_max_ino_limit)) ||
2762 	    ((di->d_fieldmask & QC_INO_HARD) &&
2763 	     (di->d_ino_hardlimit > dqi->dqi_max_ino_limit)))
2764 		return -ERANGE;
2765 
2766 	spin_lock(&dquot->dq_dqb_lock);
2767 	if (di->d_fieldmask & QC_SPACE) {
2768 		dm->dqb_curspace = di->d_space - dm->dqb_rsvspace;
2769 		check_blim = 1;
2770 		set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
2771 	}
2772 
2773 	if (di->d_fieldmask & QC_SPC_SOFT)
2774 		dm->dqb_bsoftlimit = di->d_spc_softlimit;
2775 	if (di->d_fieldmask & QC_SPC_HARD)
2776 		dm->dqb_bhardlimit = di->d_spc_hardlimit;
2777 	if (di->d_fieldmask & (QC_SPC_SOFT | QC_SPC_HARD)) {
2778 		check_blim = 1;
2779 		set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
2780 	}
2781 
2782 	if (di->d_fieldmask & QC_INO_COUNT) {
2783 		dm->dqb_curinodes = di->d_ino_count;
2784 		check_ilim = 1;
2785 		set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
2786 	}
2787 
2788 	if (di->d_fieldmask & QC_INO_SOFT)
2789 		dm->dqb_isoftlimit = di->d_ino_softlimit;
2790 	if (di->d_fieldmask & QC_INO_HARD)
2791 		dm->dqb_ihardlimit = di->d_ino_hardlimit;
2792 	if (di->d_fieldmask & (QC_INO_SOFT | QC_INO_HARD)) {
2793 		check_ilim = 1;
2794 		set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
2795 	}
2796 
2797 	if (di->d_fieldmask & QC_SPC_TIMER) {
2798 		dm->dqb_btime = di->d_spc_timer;
2799 		check_blim = 1;
2800 		set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
2801 	}
2802 
2803 	if (di->d_fieldmask & QC_INO_TIMER) {
2804 		dm->dqb_itime = di->d_ino_timer;
2805 		check_ilim = 1;
2806 		set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
2807 	}
2808 
2809 	if (check_blim) {
2810 		if (!dm->dqb_bsoftlimit ||
2811 		    dm->dqb_curspace + dm->dqb_rsvspace <= dm->dqb_bsoftlimit) {
2812 			dm->dqb_btime = 0;
2813 			clear_bit(DQ_BLKS_B, &dquot->dq_flags);
2814 		} else if (!(di->d_fieldmask & QC_SPC_TIMER))
2815 			/* Set grace only if user hasn't provided his own... */
2816 			dm->dqb_btime = ktime_get_real_seconds() + dqi->dqi_bgrace;
2817 	}
2818 	if (check_ilim) {
2819 		if (!dm->dqb_isoftlimit ||
2820 		    dm->dqb_curinodes <= dm->dqb_isoftlimit) {
2821 			dm->dqb_itime = 0;
2822 			clear_bit(DQ_INODES_B, &dquot->dq_flags);
2823 		} else if (!(di->d_fieldmask & QC_INO_TIMER))
2824 			/* Set grace only if user hasn't provided his own... */
2825 			dm->dqb_itime = ktime_get_real_seconds() + dqi->dqi_igrace;
2826 	}
2827 	if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit ||
2828 	    dm->dqb_isoftlimit)
2829 		clear_bit(DQ_FAKE_B, &dquot->dq_flags);
2830 	else
2831 		set_bit(DQ_FAKE_B, &dquot->dq_flags);
2832 	spin_unlock(&dquot->dq_dqb_lock);
2833 	ret = mark_dquot_dirty(dquot);
2834 	if (ret < 0)
2835 		return ret;
2836 	return 0;
2837 }
2838 
dquot_set_dqblk(struct super_block * sb,struct kqid qid,struct qc_dqblk * di)2839 int dquot_set_dqblk(struct super_block *sb, struct kqid qid,
2840 		  struct qc_dqblk *di)
2841 {
2842 	struct dquot *dquot;
2843 	int rc;
2844 
2845 	dquot = dqget(sb, qid);
2846 	if (IS_ERR(dquot)) {
2847 		rc = PTR_ERR(dquot);
2848 		goto out;
2849 	}
2850 	rc = do_set_dqblk(dquot, di);
2851 	dqput(dquot);
2852 out:
2853 	return rc;
2854 }
2855 EXPORT_SYMBOL(dquot_set_dqblk);
2856 
2857 /* Generic routine for getting common part of quota file information */
dquot_get_state(struct super_block * sb,struct qc_state * state)2858 int dquot_get_state(struct super_block *sb, struct qc_state *state)
2859 {
2860 	struct mem_dqinfo *mi;
2861 	struct qc_type_state *tstate;
2862 	struct quota_info *dqopt = sb_dqopt(sb);
2863 	int type;
2864 
2865 	memset(state, 0, sizeof(*state));
2866 	for (type = 0; type < MAXQUOTAS; type++) {
2867 		if (!sb_has_quota_active(sb, type))
2868 			continue;
2869 		tstate = state->s_state + type;
2870 		mi = sb_dqopt(sb)->info + type;
2871 		tstate->flags = QCI_ACCT_ENABLED;
2872 		spin_lock(&dq_data_lock);
2873 		if (mi->dqi_flags & DQF_SYS_FILE)
2874 			tstate->flags |= QCI_SYSFILE;
2875 		if (mi->dqi_flags & DQF_ROOT_SQUASH)
2876 			tstate->flags |= QCI_ROOT_SQUASH;
2877 		if (sb_has_quota_limits_enabled(sb, type))
2878 			tstate->flags |= QCI_LIMITS_ENFORCED;
2879 		tstate->spc_timelimit = mi->dqi_bgrace;
2880 		tstate->ino_timelimit = mi->dqi_igrace;
2881 		if (dqopt->files[type]) {
2882 			tstate->ino = dqopt->files[type]->i_ino;
2883 			tstate->blocks = dqopt->files[type]->i_blocks;
2884 		}
2885 		tstate->nextents = 1;	/* We don't know... */
2886 		spin_unlock(&dq_data_lock);
2887 	}
2888 	return 0;
2889 }
2890 EXPORT_SYMBOL(dquot_get_state);
2891 
2892 /* Generic routine for setting common part of quota file information */
dquot_set_dqinfo(struct super_block * sb,int type,struct qc_info * ii)2893 int dquot_set_dqinfo(struct super_block *sb, int type, struct qc_info *ii)
2894 {
2895 	struct mem_dqinfo *mi;
2896 
2897 	if ((ii->i_fieldmask & QC_WARNS_MASK) ||
2898 	    (ii->i_fieldmask & QC_RT_SPC_TIMER))
2899 		return -EINVAL;
2900 	if (!sb_has_quota_active(sb, type))
2901 		return -ESRCH;
2902 	mi = sb_dqopt(sb)->info + type;
2903 	if (ii->i_fieldmask & QC_FLAGS) {
2904 		if ((ii->i_flags & QCI_ROOT_SQUASH &&
2905 		     mi->dqi_format->qf_fmt_id != QFMT_VFS_OLD))
2906 			return -EINVAL;
2907 	}
2908 	spin_lock(&dq_data_lock);
2909 	if (ii->i_fieldmask & QC_SPC_TIMER)
2910 		mi->dqi_bgrace = ii->i_spc_timelimit;
2911 	if (ii->i_fieldmask & QC_INO_TIMER)
2912 		mi->dqi_igrace = ii->i_ino_timelimit;
2913 	if (ii->i_fieldmask & QC_FLAGS) {
2914 		if (ii->i_flags & QCI_ROOT_SQUASH)
2915 			mi->dqi_flags |= DQF_ROOT_SQUASH;
2916 		else
2917 			mi->dqi_flags &= ~DQF_ROOT_SQUASH;
2918 	}
2919 	spin_unlock(&dq_data_lock);
2920 	mark_info_dirty(sb, type);
2921 	/* Force write to disk */
2922 	return sb->dq_op->write_info(sb, type);
2923 }
2924 EXPORT_SYMBOL(dquot_set_dqinfo);
2925 
2926 const struct quotactl_ops dquot_quotactl_sysfile_ops = {
2927 	.quota_enable	= dquot_quota_enable,
2928 	.quota_disable	= dquot_quota_disable,
2929 	.quota_sync	= dquot_quota_sync,
2930 	.get_state	= dquot_get_state,
2931 	.set_info	= dquot_set_dqinfo,
2932 	.get_dqblk	= dquot_get_dqblk,
2933 	.get_nextdqblk	= dquot_get_next_dqblk,
2934 	.set_dqblk	= dquot_set_dqblk
2935 };
2936 EXPORT_SYMBOL(dquot_quotactl_sysfile_ops);
2937 
do_proc_dqstats(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)2938 static int do_proc_dqstats(const struct ctl_table *table, int write,
2939 		     void *buffer, size_t *lenp, loff_t *ppos)
2940 {
2941 	unsigned int type = (unsigned long *)table->data - dqstats.stat;
2942 	s64 value = percpu_counter_sum(&dqstats.counter[type]);
2943 
2944 	/* Filter negative values for non-monotonic counters */
2945 	if (value < 0 && (type == DQST_ALLOC_DQUOTS ||
2946 			  type == DQST_FREE_DQUOTS))
2947 		value = 0;
2948 
2949 	/* Update global table */
2950 	dqstats.stat[type] = value;
2951 	return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
2952 }
2953 
2954 static const struct ctl_table fs_dqstats_table[] = {
2955 	{
2956 		.procname	= "lookups",
2957 		.data		= &dqstats.stat[DQST_LOOKUPS],
2958 		.maxlen		= sizeof(unsigned long),
2959 		.mode		= 0444,
2960 		.proc_handler	= do_proc_dqstats,
2961 	},
2962 	{
2963 		.procname	= "drops",
2964 		.data		= &dqstats.stat[DQST_DROPS],
2965 		.maxlen		= sizeof(unsigned long),
2966 		.mode		= 0444,
2967 		.proc_handler	= do_proc_dqstats,
2968 	},
2969 	{
2970 		.procname	= "reads",
2971 		.data		= &dqstats.stat[DQST_READS],
2972 		.maxlen		= sizeof(unsigned long),
2973 		.mode		= 0444,
2974 		.proc_handler	= do_proc_dqstats,
2975 	},
2976 	{
2977 		.procname	= "writes",
2978 		.data		= &dqstats.stat[DQST_WRITES],
2979 		.maxlen		= sizeof(unsigned long),
2980 		.mode		= 0444,
2981 		.proc_handler	= do_proc_dqstats,
2982 	},
2983 	{
2984 		.procname	= "cache_hits",
2985 		.data		= &dqstats.stat[DQST_CACHE_HITS],
2986 		.maxlen		= sizeof(unsigned long),
2987 		.mode		= 0444,
2988 		.proc_handler	= do_proc_dqstats,
2989 	},
2990 	{
2991 		.procname	= "allocated_dquots",
2992 		.data		= &dqstats.stat[DQST_ALLOC_DQUOTS],
2993 		.maxlen		= sizeof(unsigned long),
2994 		.mode		= 0444,
2995 		.proc_handler	= do_proc_dqstats,
2996 	},
2997 	{
2998 		.procname	= "free_dquots",
2999 		.data		= &dqstats.stat[DQST_FREE_DQUOTS],
3000 		.maxlen		= sizeof(unsigned long),
3001 		.mode		= 0444,
3002 		.proc_handler	= do_proc_dqstats,
3003 	},
3004 	{
3005 		.procname	= "syncs",
3006 		.data		= &dqstats.stat[DQST_SYNCS],
3007 		.maxlen		= sizeof(unsigned long),
3008 		.mode		= 0444,
3009 		.proc_handler	= do_proc_dqstats,
3010 	},
3011 #ifdef CONFIG_PRINT_QUOTA_WARNING
3012 	{
3013 		.procname	= "warnings",
3014 		.data		= &flag_print_warnings,
3015 		.maxlen		= sizeof(int),
3016 		.mode		= 0644,
3017 		.proc_handler	= proc_dointvec,
3018 	},
3019 #endif
3020 };
3021 
dquot_init(void)3022 static int __init dquot_init(void)
3023 {
3024 	int i, ret;
3025 	unsigned long nr_hash, order;
3026 	struct shrinker *dqcache_shrinker;
3027 
3028 	printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__);
3029 
3030 	register_sysctl_init("fs/quota", fs_dqstats_table);
3031 
3032 	dquot_cachep = kmem_cache_create("dquot",
3033 			sizeof(struct dquot), sizeof(unsigned long) * 4,
3034 			(SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
3035 				SLAB_PANIC),
3036 			NULL);
3037 
3038 	order = 0;
3039 	dquot_hash = (struct hlist_head *)__get_free_pages(GFP_KERNEL, order);
3040 	if (!dquot_hash)
3041 		panic("Cannot create dquot hash table");
3042 
3043 	ret = percpu_counter_init_many(dqstats.counter, 0, GFP_KERNEL,
3044 				       _DQST_DQSTAT_LAST);
3045 	if (ret)
3046 		panic("Cannot create dquot stat counters");
3047 
3048 	/* Find power-of-two hlist_heads which can fit into allocation */
3049 	nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
3050 	dq_hash_bits = ilog2(nr_hash);
3051 
3052 	nr_hash = 1UL << dq_hash_bits;
3053 	dq_hash_mask = nr_hash - 1;
3054 	for (i = 0; i < nr_hash; i++)
3055 		INIT_HLIST_HEAD(dquot_hash + i);
3056 
3057 	pr_info("VFS: Dquot-cache hash table entries: %ld (order %ld,"
3058 		" %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order));
3059 
3060 	dqcache_shrinker = shrinker_alloc(0, "dquota-cache");
3061 	if (!dqcache_shrinker)
3062 		panic("Cannot allocate dquot shrinker");
3063 
3064 	dqcache_shrinker->count_objects = dqcache_shrink_count;
3065 	dqcache_shrinker->scan_objects = dqcache_shrink_scan;
3066 
3067 	shrinker_register(dqcache_shrinker);
3068 
3069 	quota_unbound_wq = alloc_workqueue("quota_events_unbound",
3070 					   WQ_UNBOUND | WQ_MEM_RECLAIM, WQ_MAX_ACTIVE);
3071 	if (!quota_unbound_wq)
3072 		panic("Cannot create quota_unbound_wq\n");
3073 
3074 	return 0;
3075 }
3076 fs_initcall(dquot_init);
3077