xref: /linux/fs/sync.c (revision 4c728ef583b3d82266584da5cb068294c09df31e)
1f79e2abbSAndrew Morton /*
2f79e2abbSAndrew Morton  * High-level sync()-related operations
3f79e2abbSAndrew Morton  */
4f79e2abbSAndrew Morton 
5f79e2abbSAndrew Morton #include <linux/kernel.h>
6f79e2abbSAndrew Morton #include <linux/file.h>
7f79e2abbSAndrew Morton #include <linux/fs.h>
8f79e2abbSAndrew Morton #include <linux/module.h>
9914e2637SAl Viro #include <linux/sched.h>
10f79e2abbSAndrew Morton #include <linux/writeback.h>
11f79e2abbSAndrew Morton #include <linux/syscalls.h>
12f79e2abbSAndrew Morton #include <linux/linkage.h>
13f79e2abbSAndrew Morton #include <linux/pagemap.h>
14cf9a2ae8SDavid Howells #include <linux/quotaops.h>
15cf9a2ae8SDavid Howells #include <linux/buffer_head.h>
16f79e2abbSAndrew Morton 
17f79e2abbSAndrew Morton #define VALID_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE| \
18f79e2abbSAndrew Morton 			SYNC_FILE_RANGE_WAIT_AFTER)
19f79e2abbSAndrew Morton 
20f79e2abbSAndrew Morton /*
21cf9a2ae8SDavid Howells  * sync everything.  Start out by waking pdflush, because that writes back
22cf9a2ae8SDavid Howells  * all queues in parallel.
23cf9a2ae8SDavid Howells  */
24cf9a2ae8SDavid Howells static void do_sync(unsigned long wait)
25cf9a2ae8SDavid Howells {
26cf9a2ae8SDavid Howells 	wakeup_pdflush(0);
27cf9a2ae8SDavid Howells 	sync_inodes(0);		/* All mappings, inodes and their blockdevs */
28cf9a2ae8SDavid Howells 	DQUOT_SYNC(NULL);
29cf9a2ae8SDavid Howells 	sync_supers();		/* Write the superblocks */
30cf9a2ae8SDavid Howells 	sync_filesystems(0);	/* Start syncing the filesystems */
31cf9a2ae8SDavid Howells 	sync_filesystems(wait);	/* Waitingly sync the filesystems */
32cf9a2ae8SDavid Howells 	sync_inodes(wait);	/* Mappings, inodes and blockdevs, again. */
33cf9a2ae8SDavid Howells 	if (!wait)
34cf9a2ae8SDavid Howells 		printk("Emergency Sync complete\n");
35cf9a2ae8SDavid Howells 	if (unlikely(laptop_mode))
36cf9a2ae8SDavid Howells 		laptop_sync_completion();
37cf9a2ae8SDavid Howells }
38cf9a2ae8SDavid Howells 
39cf9a2ae8SDavid Howells asmlinkage long sys_sync(void)
40cf9a2ae8SDavid Howells {
41cf9a2ae8SDavid Howells 	do_sync(1);
42cf9a2ae8SDavid Howells 	return 0;
43cf9a2ae8SDavid Howells }
44cf9a2ae8SDavid Howells 
45cf9a2ae8SDavid Howells void emergency_sync(void)
46cf9a2ae8SDavid Howells {
47cf9a2ae8SDavid Howells 	pdflush_operation(do_sync, 0);
48cf9a2ae8SDavid Howells }
49cf9a2ae8SDavid Howells 
50cf9a2ae8SDavid Howells /*
51cf9a2ae8SDavid Howells  * Generic function to fsync a file.
52cf9a2ae8SDavid Howells  *
53cf9a2ae8SDavid Howells  * filp may be NULL if called via the msync of a vma.
54cf9a2ae8SDavid Howells  */
55cf9a2ae8SDavid Howells int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
56cf9a2ae8SDavid Howells {
57cf9a2ae8SDavid Howells 	struct inode * inode = dentry->d_inode;
58cf9a2ae8SDavid Howells 	struct super_block * sb;
59cf9a2ae8SDavid Howells 	int ret, err;
60cf9a2ae8SDavid Howells 
61cf9a2ae8SDavid Howells 	/* sync the inode to buffers */
62cf9a2ae8SDavid Howells 	ret = write_inode_now(inode, 0);
63cf9a2ae8SDavid Howells 
64cf9a2ae8SDavid Howells 	/* sync the superblock to buffers */
65cf9a2ae8SDavid Howells 	sb = inode->i_sb;
66cf9a2ae8SDavid Howells 	lock_super(sb);
67762873c2SOGAWA Hirofumi 	if (sb->s_dirt && sb->s_op->write_super)
68cf9a2ae8SDavid Howells 		sb->s_op->write_super(sb);
69cf9a2ae8SDavid Howells 	unlock_super(sb);
70cf9a2ae8SDavid Howells 
71cf9a2ae8SDavid Howells 	/* .. finally sync the buffers to disk */
72cf9a2ae8SDavid Howells 	err = sync_blockdev(sb->s_bdev);
73cf9a2ae8SDavid Howells 	if (!ret)
74cf9a2ae8SDavid Howells 		ret = err;
75cf9a2ae8SDavid Howells 	return ret;
76cf9a2ae8SDavid Howells }
77cf9a2ae8SDavid Howells 
78*4c728ef5SChristoph Hellwig /**
79*4c728ef5SChristoph Hellwig  * vfs_fsync - perform a fsync or fdatasync on a file
80*4c728ef5SChristoph Hellwig  * @file:		file to sync
81*4c728ef5SChristoph Hellwig  * @dentry:		dentry of @file
82*4c728ef5SChristoph Hellwig  * @data:		only perform a fdatasync operation
83*4c728ef5SChristoph Hellwig  *
84*4c728ef5SChristoph Hellwig  * Write back data and metadata for @file to disk.  If @datasync is
85*4c728ef5SChristoph Hellwig  * set only metadata needed to access modified file data is written.
86*4c728ef5SChristoph Hellwig  *
87*4c728ef5SChristoph Hellwig  * In case this function is called from nfsd @file may be %NULL and
88*4c728ef5SChristoph Hellwig  * only @dentry is set.  This can only happen when the filesystem
89*4c728ef5SChristoph Hellwig  * implements the export_operations API.
90*4c728ef5SChristoph Hellwig  */
91*4c728ef5SChristoph Hellwig int vfs_fsync(struct file *file, struct dentry *dentry, int datasync)
92cf9a2ae8SDavid Howells {
93*4c728ef5SChristoph Hellwig 	const struct file_operations *fop;
94*4c728ef5SChristoph Hellwig 	struct address_space *mapping;
95*4c728ef5SChristoph Hellwig 	int err, ret;
96cf9a2ae8SDavid Howells 
97*4c728ef5SChristoph Hellwig 	/*
98*4c728ef5SChristoph Hellwig 	 * Get mapping and operations from the file in case we have
99*4c728ef5SChristoph Hellwig 	 * as file, or get the default values for them in case we
100*4c728ef5SChristoph Hellwig 	 * don't have a struct file available.  Damn nfsd..
101*4c728ef5SChristoph Hellwig 	 */
102*4c728ef5SChristoph Hellwig 	if (file) {
103*4c728ef5SChristoph Hellwig 		mapping = file->f_mapping;
104*4c728ef5SChristoph Hellwig 		fop = file->f_op;
105*4c728ef5SChristoph Hellwig 	} else {
106*4c728ef5SChristoph Hellwig 		mapping = dentry->d_inode->i_mapping;
107*4c728ef5SChristoph Hellwig 		fop = dentry->d_inode->i_fop;
108*4c728ef5SChristoph Hellwig 	}
109*4c728ef5SChristoph Hellwig 
110*4c728ef5SChristoph Hellwig 	if (!fop || !fop->fsync) {
111cf9a2ae8SDavid Howells 		ret = -EINVAL;
112cf9a2ae8SDavid Howells 		goto out;
113cf9a2ae8SDavid Howells 	}
114cf9a2ae8SDavid Howells 
115cf9a2ae8SDavid Howells 	ret = filemap_fdatawrite(mapping);
116cf9a2ae8SDavid Howells 
117cf9a2ae8SDavid Howells 	/*
118cf9a2ae8SDavid Howells 	 * We need to protect against concurrent writers, which could cause
119cf9a2ae8SDavid Howells 	 * livelocks in fsync_buffers_list().
120cf9a2ae8SDavid Howells 	 */
121cf9a2ae8SDavid Howells 	mutex_lock(&mapping->host->i_mutex);
122*4c728ef5SChristoph Hellwig 	err = fop->fsync(file, dentry, datasync);
123cf9a2ae8SDavid Howells 	if (!ret)
124cf9a2ae8SDavid Howells 		ret = err;
125cf9a2ae8SDavid Howells 	mutex_unlock(&mapping->host->i_mutex);
126cf9a2ae8SDavid Howells 	err = filemap_fdatawait(mapping);
127cf9a2ae8SDavid Howells 	if (!ret)
128cf9a2ae8SDavid Howells 		ret = err;
129cf9a2ae8SDavid Howells out:
130cf9a2ae8SDavid Howells 	return ret;
131cf9a2ae8SDavid Howells }
132*4c728ef5SChristoph Hellwig EXPORT_SYMBOL(vfs_fsync);
133cf9a2ae8SDavid Howells 
134*4c728ef5SChristoph Hellwig static int do_fsync(unsigned int fd, int datasync)
135cf9a2ae8SDavid Howells {
136cf9a2ae8SDavid Howells 	struct file *file;
137cf9a2ae8SDavid Howells 	int ret = -EBADF;
138cf9a2ae8SDavid Howells 
139cf9a2ae8SDavid Howells 	file = fget(fd);
140cf9a2ae8SDavid Howells 	if (file) {
141*4c728ef5SChristoph Hellwig 		ret = vfs_fsync(file, file->f_path.dentry, datasync);
142cf9a2ae8SDavid Howells 		fput(file);
143cf9a2ae8SDavid Howells 	}
144cf9a2ae8SDavid Howells 	return ret;
145cf9a2ae8SDavid Howells }
146cf9a2ae8SDavid Howells 
147cf9a2ae8SDavid Howells asmlinkage long sys_fsync(unsigned int fd)
148cf9a2ae8SDavid Howells {
149*4c728ef5SChristoph Hellwig 	return do_fsync(fd, 0);
150cf9a2ae8SDavid Howells }
151cf9a2ae8SDavid Howells 
152cf9a2ae8SDavid Howells asmlinkage long sys_fdatasync(unsigned int fd)
153cf9a2ae8SDavid Howells {
154*4c728ef5SChristoph Hellwig 	return do_fsync(fd, 1);
155cf9a2ae8SDavid Howells }
156cf9a2ae8SDavid Howells 
157cf9a2ae8SDavid Howells /*
158f79e2abbSAndrew Morton  * sys_sync_file_range() permits finely controlled syncing over a segment of
159f79e2abbSAndrew Morton  * a file in the range offset .. (offset+nbytes-1) inclusive.  If nbytes is
160f79e2abbSAndrew Morton  * zero then sys_sync_file_range() will operate from offset out to EOF.
161f79e2abbSAndrew Morton  *
162f79e2abbSAndrew Morton  * The flag bits are:
163f79e2abbSAndrew Morton  *
164f79e2abbSAndrew Morton  * SYNC_FILE_RANGE_WAIT_BEFORE: wait upon writeout of all pages in the range
165f79e2abbSAndrew Morton  * before performing the write.
166f79e2abbSAndrew Morton  *
167f79e2abbSAndrew Morton  * SYNC_FILE_RANGE_WRITE: initiate writeout of all those dirty pages in the
168cce77081SPavel Machek  * range which are not presently under writeback. Note that this may block for
169cce77081SPavel Machek  * significant periods due to exhaustion of disk request structures.
170f79e2abbSAndrew Morton  *
171f79e2abbSAndrew Morton  * SYNC_FILE_RANGE_WAIT_AFTER: wait upon writeout of all pages in the range
172f79e2abbSAndrew Morton  * after performing the write.
173f79e2abbSAndrew Morton  *
174f79e2abbSAndrew Morton  * Useful combinations of the flag bits are:
175f79e2abbSAndrew Morton  *
176f79e2abbSAndrew Morton  * SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE: ensures that all pages
177f79e2abbSAndrew Morton  * in the range which were dirty on entry to sys_sync_file_range() are placed
178f79e2abbSAndrew Morton  * under writeout.  This is a start-write-for-data-integrity operation.
179f79e2abbSAndrew Morton  *
180f79e2abbSAndrew Morton  * SYNC_FILE_RANGE_WRITE: start writeout of all dirty pages in the range which
181f79e2abbSAndrew Morton  * are not presently under writeout.  This is an asynchronous flush-to-disk
182f79e2abbSAndrew Morton  * operation.  Not suitable for data integrity operations.
183f79e2abbSAndrew Morton  *
184f79e2abbSAndrew Morton  * SYNC_FILE_RANGE_WAIT_BEFORE (or SYNC_FILE_RANGE_WAIT_AFTER): wait for
185f79e2abbSAndrew Morton  * completion of writeout of all pages in the range.  This will be used after an
186f79e2abbSAndrew Morton  * earlier SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE operation to wait
187f79e2abbSAndrew Morton  * for that operation to complete and to return the result.
188f79e2abbSAndrew Morton  *
189f79e2abbSAndrew Morton  * SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE|SYNC_FILE_RANGE_WAIT_AFTER:
190f79e2abbSAndrew Morton  * a traditional sync() operation.  This is a write-for-data-integrity operation
191f79e2abbSAndrew Morton  * which will ensure that all pages in the range which were dirty on entry to
192f79e2abbSAndrew Morton  * sys_sync_file_range() are committed to disk.
193f79e2abbSAndrew Morton  *
194f79e2abbSAndrew Morton  *
195f79e2abbSAndrew Morton  * SYNC_FILE_RANGE_WAIT_BEFORE and SYNC_FILE_RANGE_WAIT_AFTER will detect any
196f79e2abbSAndrew Morton  * I/O errors or ENOSPC conditions and will return those to the caller, after
197f79e2abbSAndrew Morton  * clearing the EIO and ENOSPC flags in the address_space.
198f79e2abbSAndrew Morton  *
199f79e2abbSAndrew Morton  * It should be noted that none of these operations write out the file's
200f79e2abbSAndrew Morton  * metadata.  So unless the application is strictly performing overwrites of
201f79e2abbSAndrew Morton  * already-instantiated disk blocks, there are no guarantees here that the data
202f79e2abbSAndrew Morton  * will be available after a crash.
203f79e2abbSAndrew Morton  */
204f79e2abbSAndrew Morton asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes,
2055246d050SAndrew Morton 					unsigned int flags)
206f79e2abbSAndrew Morton {
207f79e2abbSAndrew Morton 	int ret;
208f79e2abbSAndrew Morton 	struct file *file;
209f79e2abbSAndrew Morton 	loff_t endbyte;			/* inclusive */
210f79e2abbSAndrew Morton 	int fput_needed;
211f79e2abbSAndrew Morton 	umode_t i_mode;
212f79e2abbSAndrew Morton 
213f79e2abbSAndrew Morton 	ret = -EINVAL;
214f79e2abbSAndrew Morton 	if (flags & ~VALID_FLAGS)
215f79e2abbSAndrew Morton 		goto out;
216f79e2abbSAndrew Morton 
217f79e2abbSAndrew Morton 	endbyte = offset + nbytes;
218f79e2abbSAndrew Morton 
219f79e2abbSAndrew Morton 	if ((s64)offset < 0)
220f79e2abbSAndrew Morton 		goto out;
221f79e2abbSAndrew Morton 	if ((s64)endbyte < 0)
222f79e2abbSAndrew Morton 		goto out;
223f79e2abbSAndrew Morton 	if (endbyte < offset)
224f79e2abbSAndrew Morton 		goto out;
225f79e2abbSAndrew Morton 
226f79e2abbSAndrew Morton 	if (sizeof(pgoff_t) == 4) {
227f79e2abbSAndrew Morton 		if (offset >= (0x100000000ULL << PAGE_CACHE_SHIFT)) {
228f79e2abbSAndrew Morton 			/*
229f79e2abbSAndrew Morton 			 * The range starts outside a 32 bit machine's
230f79e2abbSAndrew Morton 			 * pagecache addressing capabilities.  Let it "succeed"
231f79e2abbSAndrew Morton 			 */
232f79e2abbSAndrew Morton 			ret = 0;
233f79e2abbSAndrew Morton 			goto out;
234f79e2abbSAndrew Morton 		}
235f79e2abbSAndrew Morton 		if (endbyte >= (0x100000000ULL << PAGE_CACHE_SHIFT)) {
236f79e2abbSAndrew Morton 			/*
237f79e2abbSAndrew Morton 			 * Out to EOF
238f79e2abbSAndrew Morton 			 */
239f79e2abbSAndrew Morton 			nbytes = 0;
240f79e2abbSAndrew Morton 		}
241f79e2abbSAndrew Morton 	}
242f79e2abbSAndrew Morton 
243f79e2abbSAndrew Morton 	if (nbytes == 0)
244111ebb6eSOGAWA Hirofumi 		endbyte = LLONG_MAX;
245f79e2abbSAndrew Morton 	else
246f79e2abbSAndrew Morton 		endbyte--;		/* inclusive */
247f79e2abbSAndrew Morton 
248f79e2abbSAndrew Morton 	ret = -EBADF;
249f79e2abbSAndrew Morton 	file = fget_light(fd, &fput_needed);
250f79e2abbSAndrew Morton 	if (!file)
251f79e2abbSAndrew Morton 		goto out;
252f79e2abbSAndrew Morton 
2530f7fc9e4SJosef "Jeff" Sipek 	i_mode = file->f_path.dentry->d_inode->i_mode;
254f79e2abbSAndrew Morton 	ret = -ESPIPE;
255f79e2abbSAndrew Morton 	if (!S_ISREG(i_mode) && !S_ISBLK(i_mode) && !S_ISDIR(i_mode) &&
256f79e2abbSAndrew Morton 			!S_ISLNK(i_mode))
257f79e2abbSAndrew Morton 		goto out_put;
258f79e2abbSAndrew Morton 
259ef51c976SMark Fasheh 	ret = do_sync_mapping_range(file->f_mapping, offset, endbyte, flags);
260f79e2abbSAndrew Morton out_put:
261f79e2abbSAndrew Morton 	fput_light(file, fput_needed);
262f79e2abbSAndrew Morton out:
263f79e2abbSAndrew Morton 	return ret;
264f79e2abbSAndrew Morton }
265f79e2abbSAndrew Morton 
266edd5cd4aSDavid Woodhouse /* It would be nice if people remember that not all the world's an i386
267edd5cd4aSDavid Woodhouse    when they introduce new system calls */
268edd5cd4aSDavid Woodhouse asmlinkage long sys_sync_file_range2(int fd, unsigned int flags,
269edd5cd4aSDavid Woodhouse 				     loff_t offset, loff_t nbytes)
270edd5cd4aSDavid Woodhouse {
271edd5cd4aSDavid Woodhouse 	return sys_sync_file_range(fd, offset, nbytes, flags);
272edd5cd4aSDavid Woodhouse }
273edd5cd4aSDavid Woodhouse 
274f79e2abbSAndrew Morton /*
275f79e2abbSAndrew Morton  * `endbyte' is inclusive
276f79e2abbSAndrew Morton  */
2775b04aa3aSMark Fasheh int do_sync_mapping_range(struct address_space *mapping, loff_t offset,
2785b04aa3aSMark Fasheh 			  loff_t endbyte, unsigned int flags)
279f79e2abbSAndrew Morton {
280f79e2abbSAndrew Morton 	int ret;
281f79e2abbSAndrew Morton 
282f79e2abbSAndrew Morton 	if (!mapping) {
283f79e2abbSAndrew Morton 		ret = -EINVAL;
284f79e2abbSAndrew Morton 		goto out;
285f79e2abbSAndrew Morton 	}
286f79e2abbSAndrew Morton 
287f79e2abbSAndrew Morton 	ret = 0;
288f79e2abbSAndrew Morton 	if (flags & SYNC_FILE_RANGE_WAIT_BEFORE) {
289f79e2abbSAndrew Morton 		ret = wait_on_page_writeback_range(mapping,
290f79e2abbSAndrew Morton 					offset >> PAGE_CACHE_SHIFT,
291f79e2abbSAndrew Morton 					endbyte >> PAGE_CACHE_SHIFT);
292f79e2abbSAndrew Morton 		if (ret < 0)
293f79e2abbSAndrew Morton 			goto out;
294f79e2abbSAndrew Morton 	}
295f79e2abbSAndrew Morton 
296f79e2abbSAndrew Morton 	if (flags & SYNC_FILE_RANGE_WRITE) {
297f79e2abbSAndrew Morton 		ret = __filemap_fdatawrite_range(mapping, offset, endbyte,
298f79e2abbSAndrew Morton 						WB_SYNC_NONE);
299f79e2abbSAndrew Morton 		if (ret < 0)
300f79e2abbSAndrew Morton 			goto out;
301f79e2abbSAndrew Morton 	}
302f79e2abbSAndrew Morton 
303f79e2abbSAndrew Morton 	if (flags & SYNC_FILE_RANGE_WAIT_AFTER) {
304f79e2abbSAndrew Morton 		ret = wait_on_page_writeback_range(mapping,
305f79e2abbSAndrew Morton 					offset >> PAGE_CACHE_SHIFT,
306f79e2abbSAndrew Morton 					endbyte >> PAGE_CACHE_SHIFT);
307f79e2abbSAndrew Morton 	}
308f79e2abbSAndrew Morton out:
309f79e2abbSAndrew Morton 	return ret;
310f79e2abbSAndrew Morton }
3115b04aa3aSMark Fasheh EXPORT_SYMBOL_GPL(do_sync_mapping_range);
312