1 /*
2  *  linux/fs/buffer.c
3  *
4  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
5  */
6 
7 /*
8  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9  *
10  * Removed a lot of unnecessary code and simplified things now that
11  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12  *
13  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
14  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
15  *
16  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17  *
18  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19  */
20 
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
23 #include <linux/fs.h>
24 #include <linux/mm.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/capability.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/task_io_accounting_ops.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 #include <linux/bit_spinlock.h>
44 
45 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
46 
47 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
48 
49 inline void
init_buffer(struct buffer_head * bh,bh_end_io_t * handler,void * private)50 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51 {
52 	bh->b_end_io = handler;
53 	bh->b_private = private;
54 }
55 EXPORT_SYMBOL(init_buffer);
56 
sleep_on_buffer(void * word)57 static int sleep_on_buffer(void *word)
58 {
59 	io_schedule();
60 	return 0;
61 }
62 
__lock_buffer(struct buffer_head * bh)63 void __lock_buffer(struct buffer_head *bh)
64 {
65 	wait_on_bit_lock(&bh->b_state, BH_Lock, sleep_on_buffer,
66 							TASK_UNINTERRUPTIBLE);
67 }
68 EXPORT_SYMBOL(__lock_buffer);
69 
unlock_buffer(struct buffer_head * bh)70 void unlock_buffer(struct buffer_head *bh)
71 {
72 	clear_bit_unlock(BH_Lock, &bh->b_state);
73 	smp_mb__after_clear_bit();
74 	wake_up_bit(&bh->b_state, BH_Lock);
75 }
76 EXPORT_SYMBOL(unlock_buffer);
77 
78 /*
79  * Block until a buffer comes unlocked.  This doesn't stop it
80  * from becoming locked again - you have to lock it yourself
81  * if you want to preserve its state.
82  */
__wait_on_buffer(struct buffer_head * bh)83 void __wait_on_buffer(struct buffer_head * bh)
84 {
85 	wait_on_bit(&bh->b_state, BH_Lock, sleep_on_buffer, TASK_UNINTERRUPTIBLE);
86 }
87 EXPORT_SYMBOL(__wait_on_buffer);
88 
89 static void
__clear_page_buffers(struct page * page)90 __clear_page_buffers(struct page *page)
91 {
92 	ClearPagePrivate(page);
93 	set_page_private(page, 0);
94 	page_cache_release(page);
95 }
96 
97 
quiet_error(struct buffer_head * bh)98 static int quiet_error(struct buffer_head *bh)
99 {
100 	if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
101 		return 0;
102 	return 1;
103 }
104 
105 
buffer_io_error(struct buffer_head * bh)106 static void buffer_io_error(struct buffer_head *bh)
107 {
108 	char b[BDEVNAME_SIZE];
109 	printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
110 			bdevname(bh->b_bdev, b),
111 			(unsigned long long)bh->b_blocknr);
112 }
113 
114 /*
115  * End-of-IO handler helper function which does not touch the bh after
116  * unlocking it.
117  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
118  * a race there is benign: unlock_buffer() only use the bh's address for
119  * hashing after unlocking the buffer, so it doesn't actually touch the bh
120  * itself.
121  */
__end_buffer_read_notouch(struct buffer_head * bh,int uptodate)122 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
123 {
124 	if (uptodate) {
125 		set_buffer_uptodate(bh);
126 	} else {
127 		/* This happens, due to failed READA attempts. */
128 		clear_buffer_uptodate(bh);
129 	}
130 	unlock_buffer(bh);
131 }
132 
133 /*
134  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
135  * unlock the buffer. This is what ll_rw_block uses too.
136  */
end_buffer_read_sync(struct buffer_head * bh,int uptodate)137 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
138 {
139 	__end_buffer_read_notouch(bh, uptodate);
140 	put_bh(bh);
141 }
142 EXPORT_SYMBOL(end_buffer_read_sync);
143 
end_buffer_write_sync(struct buffer_head * bh,int uptodate)144 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
145 {
146 	char b[BDEVNAME_SIZE];
147 
148 	if (uptodate) {
149 		set_buffer_uptodate(bh);
150 	} else {
151 		if (!quiet_error(bh)) {
152 			buffer_io_error(bh);
153 			printk(KERN_WARNING "lost page write due to "
154 					"I/O error on %s\n",
155 				       bdevname(bh->b_bdev, b));
156 		}
157 		set_buffer_write_io_error(bh);
158 		clear_buffer_uptodate(bh);
159 	}
160 	unlock_buffer(bh);
161 	put_bh(bh);
162 }
163 EXPORT_SYMBOL(end_buffer_write_sync);
164 
165 /*
166  * Various filesystems appear to want __find_get_block to be non-blocking.
167  * But it's the page lock which protects the buffers.  To get around this,
168  * we get exclusion from try_to_free_buffers with the blockdev mapping's
169  * private_lock.
170  *
171  * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
172  * may be quite high.  This code could TryLock the page, and if that
173  * succeeds, there is no need to take private_lock. (But if
174  * private_lock is contended then so is mapping->tree_lock).
175  */
176 static struct buffer_head *
__find_get_block_slow(struct block_device * bdev,sector_t block)177 __find_get_block_slow(struct block_device *bdev, sector_t block)
178 {
179 	struct inode *bd_inode = bdev->bd_inode;
180 	struct address_space *bd_mapping = bd_inode->i_mapping;
181 	struct buffer_head *ret = NULL;
182 	pgoff_t index;
183 	struct buffer_head *bh;
184 	struct buffer_head *head;
185 	struct page *page;
186 	int all_mapped = 1;
187 
188 	index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
189 	page = find_get_page(bd_mapping, index);
190 	if (!page)
191 		goto out;
192 
193 	spin_lock(&bd_mapping->private_lock);
194 	if (!page_has_buffers(page))
195 		goto out_unlock;
196 	head = page_buffers(page);
197 	bh = head;
198 	do {
199 		if (!buffer_mapped(bh))
200 			all_mapped = 0;
201 		else if (bh->b_blocknr == block) {
202 			ret = bh;
203 			get_bh(bh);
204 			goto out_unlock;
205 		}
206 		bh = bh->b_this_page;
207 	} while (bh != head);
208 
209 	/* we might be here because some of the buffers on this page are
210 	 * not mapped.  This is due to various races between
211 	 * file io on the block device and getblk.  It gets dealt with
212 	 * elsewhere, don't buffer_error if we had some unmapped buffers
213 	 */
214 	if (all_mapped) {
215 		char b[BDEVNAME_SIZE];
216 
217 		printk("__find_get_block_slow() failed. "
218 			"block=%llu, b_blocknr=%llu\n",
219 			(unsigned long long)block,
220 			(unsigned long long)bh->b_blocknr);
221 		printk("b_state=0x%08lx, b_size=%zu\n",
222 			bh->b_state, bh->b_size);
223 		printk("device %s blocksize: %d\n", bdevname(bdev, b),
224 			1 << bd_inode->i_blkbits);
225 	}
226 out_unlock:
227 	spin_unlock(&bd_mapping->private_lock);
228 	page_cache_release(page);
229 out:
230 	return ret;
231 }
232 
233 /*
234  * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
235  */
free_more_memory(void)236 static void free_more_memory(void)
237 {
238 	struct zone *zone;
239 	int nid;
240 
241 	wakeup_flusher_threads(1024, WB_REASON_FREE_MORE_MEM);
242 	yield();
243 
244 	for_each_online_node(nid) {
245 		(void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
246 						gfp_zone(GFP_NOFS), NULL,
247 						&zone);
248 		if (zone)
249 			try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
250 						GFP_NOFS, NULL);
251 	}
252 }
253 
254 /*
255  * I/O completion handler for block_read_full_page() - pages
256  * which come unlocked at the end of I/O.
257  */
end_buffer_async_read(struct buffer_head * bh,int uptodate)258 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
259 {
260 	unsigned long flags;
261 	struct buffer_head *first;
262 	struct buffer_head *tmp;
263 	struct page *page;
264 	int page_uptodate = 1;
265 
266 	BUG_ON(!buffer_async_read(bh));
267 
268 	page = bh->b_page;
269 	if (uptodate) {
270 		set_buffer_uptodate(bh);
271 	} else {
272 		clear_buffer_uptodate(bh);
273 		if (!quiet_error(bh))
274 			buffer_io_error(bh);
275 		SetPageError(page);
276 	}
277 
278 	/*
279 	 * Be _very_ careful from here on. Bad things can happen if
280 	 * two buffer heads end IO at almost the same time and both
281 	 * decide that the page is now completely done.
282 	 */
283 	first = page_buffers(page);
284 	local_irq_save(flags);
285 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
286 	clear_buffer_async_read(bh);
287 	unlock_buffer(bh);
288 	tmp = bh;
289 	do {
290 		if (!buffer_uptodate(tmp))
291 			page_uptodate = 0;
292 		if (buffer_async_read(tmp)) {
293 			BUG_ON(!buffer_locked(tmp));
294 			goto still_busy;
295 		}
296 		tmp = tmp->b_this_page;
297 	} while (tmp != bh);
298 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
299 	local_irq_restore(flags);
300 
301 	/*
302 	 * If none of the buffers had errors and they are all
303 	 * uptodate then we can set the page uptodate.
304 	 */
305 	if (page_uptodate && !PageError(page))
306 		SetPageUptodate(page);
307 	unlock_page(page);
308 	return;
309 
310 still_busy:
311 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
312 	local_irq_restore(flags);
313 	return;
314 }
315 
316 /*
317  * Completion handler for block_write_full_page() - pages which are unlocked
318  * during I/O, and which have PageWriteback cleared upon I/O completion.
319  */
end_buffer_async_write(struct buffer_head * bh,int uptodate)320 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
321 {
322 	char b[BDEVNAME_SIZE];
323 	unsigned long flags;
324 	struct buffer_head *first;
325 	struct buffer_head *tmp;
326 	struct page *page;
327 
328 	BUG_ON(!buffer_async_write(bh));
329 
330 	page = bh->b_page;
331 	if (uptodate) {
332 		set_buffer_uptodate(bh);
333 	} else {
334 		if (!quiet_error(bh)) {
335 			buffer_io_error(bh);
336 			printk(KERN_WARNING "lost page write due to "
337 					"I/O error on %s\n",
338 			       bdevname(bh->b_bdev, b));
339 		}
340 		set_bit(AS_EIO, &page->mapping->flags);
341 		set_buffer_write_io_error(bh);
342 		clear_buffer_uptodate(bh);
343 		SetPageError(page);
344 	}
345 
346 	first = page_buffers(page);
347 	local_irq_save(flags);
348 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
349 
350 	clear_buffer_async_write(bh);
351 	unlock_buffer(bh);
352 	tmp = bh->b_this_page;
353 	while (tmp != bh) {
354 		if (buffer_async_write(tmp)) {
355 			BUG_ON(!buffer_locked(tmp));
356 			goto still_busy;
357 		}
358 		tmp = tmp->b_this_page;
359 	}
360 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
361 	local_irq_restore(flags);
362 	end_page_writeback(page);
363 	return;
364 
365 still_busy:
366 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
367 	local_irq_restore(flags);
368 	return;
369 }
370 EXPORT_SYMBOL(end_buffer_async_write);
371 
372 /*
373  * If a page's buffers are under async readin (end_buffer_async_read
374  * completion) then there is a possibility that another thread of
375  * control could lock one of the buffers after it has completed
376  * but while some of the other buffers have not completed.  This
377  * locked buffer would confuse end_buffer_async_read() into not unlocking
378  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
379  * that this buffer is not under async I/O.
380  *
381  * The page comes unlocked when it has no locked buffer_async buffers
382  * left.
383  *
384  * PageLocked prevents anyone starting new async I/O reads any of
385  * the buffers.
386  *
387  * PageWriteback is used to prevent simultaneous writeout of the same
388  * page.
389  *
390  * PageLocked prevents anyone from starting writeback of a page which is
391  * under read I/O (PageWriteback is only ever set against a locked page).
392  */
mark_buffer_async_read(struct buffer_head * bh)393 static void mark_buffer_async_read(struct buffer_head *bh)
394 {
395 	bh->b_end_io = end_buffer_async_read;
396 	set_buffer_async_read(bh);
397 }
398 
mark_buffer_async_write_endio(struct buffer_head * bh,bh_end_io_t * handler)399 static void mark_buffer_async_write_endio(struct buffer_head *bh,
400 					  bh_end_io_t *handler)
401 {
402 	bh->b_end_io = handler;
403 	set_buffer_async_write(bh);
404 }
405 
mark_buffer_async_write(struct buffer_head * bh)406 void mark_buffer_async_write(struct buffer_head *bh)
407 {
408 	mark_buffer_async_write_endio(bh, end_buffer_async_write);
409 }
410 EXPORT_SYMBOL(mark_buffer_async_write);
411 
412 
413 /*
414  * fs/buffer.c contains helper functions for buffer-backed address space's
415  * fsync functions.  A common requirement for buffer-based filesystems is
416  * that certain data from the backing blockdev needs to be written out for
417  * a successful fsync().  For example, ext2 indirect blocks need to be
418  * written back and waited upon before fsync() returns.
419  *
420  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
421  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
422  * management of a list of dependent buffers at ->i_mapping->private_list.
423  *
424  * Locking is a little subtle: try_to_free_buffers() will remove buffers
425  * from their controlling inode's queue when they are being freed.  But
426  * try_to_free_buffers() will be operating against the *blockdev* mapping
427  * at the time, not against the S_ISREG file which depends on those buffers.
428  * So the locking for private_list is via the private_lock in the address_space
429  * which backs the buffers.  Which is different from the address_space
430  * against which the buffers are listed.  So for a particular address_space,
431  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
432  * mapping->private_list will always be protected by the backing blockdev's
433  * ->private_lock.
434  *
435  * Which introduces a requirement: all buffers on an address_space's
436  * ->private_list must be from the same address_space: the blockdev's.
437  *
438  * address_spaces which do not place buffers at ->private_list via these
439  * utility functions are free to use private_lock and private_list for
440  * whatever they want.  The only requirement is that list_empty(private_list)
441  * be true at clear_inode() time.
442  *
443  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
444  * filesystems should do that.  invalidate_inode_buffers() should just go
445  * BUG_ON(!list_empty).
446  *
447  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
448  * take an address_space, not an inode.  And it should be called
449  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
450  * queued up.
451  *
452  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
453  * list if it is already on a list.  Because if the buffer is on a list,
454  * it *must* already be on the right one.  If not, the filesystem is being
455  * silly.  This will save a ton of locking.  But first we have to ensure
456  * that buffers are taken *off* the old inode's list when they are freed
457  * (presumably in truncate).  That requires careful auditing of all
458  * filesystems (do it inside bforget()).  It could also be done by bringing
459  * b_inode back.
460  */
461 
462 /*
463  * The buffer's backing address_space's private_lock must be held
464  */
__remove_assoc_queue(struct buffer_head * bh)465 static void __remove_assoc_queue(struct buffer_head *bh)
466 {
467 	list_del_init(&bh->b_assoc_buffers);
468 	WARN_ON(!bh->b_assoc_map);
469 	if (buffer_write_io_error(bh))
470 		set_bit(AS_EIO, &bh->b_assoc_map->flags);
471 	bh->b_assoc_map = NULL;
472 }
473 
inode_has_buffers(struct inode * inode)474 int inode_has_buffers(struct inode *inode)
475 {
476 	return !list_empty(&inode->i_data.private_list);
477 }
478 
479 /*
480  * osync is designed to support O_SYNC io.  It waits synchronously for
481  * all already-submitted IO to complete, but does not queue any new
482  * writes to the disk.
483  *
484  * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
485  * you dirty the buffers, and then use osync_inode_buffers to wait for
486  * completion.  Any other dirty buffers which are not yet queued for
487  * write will not be flushed to disk by the osync.
488  */
osync_buffers_list(spinlock_t * lock,struct list_head * list)489 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
490 {
491 	struct buffer_head *bh;
492 	struct list_head *p;
493 	int err = 0;
494 
495 	spin_lock(lock);
496 repeat:
497 	list_for_each_prev(p, list) {
498 		bh = BH_ENTRY(p);
499 		if (buffer_locked(bh)) {
500 			get_bh(bh);
501 			spin_unlock(lock);
502 			wait_on_buffer(bh);
503 			if (!buffer_uptodate(bh))
504 				err = -EIO;
505 			brelse(bh);
506 			spin_lock(lock);
507 			goto repeat;
508 		}
509 	}
510 	spin_unlock(lock);
511 	return err;
512 }
513 
do_thaw_one(struct super_block * sb,void * unused)514 static void do_thaw_one(struct super_block *sb, void *unused)
515 {
516 	char b[BDEVNAME_SIZE];
517 	while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
518 		printk(KERN_WARNING "Emergency Thaw on %s\n",
519 		       bdevname(sb->s_bdev, b));
520 }
521 
do_thaw_all(struct work_struct * work)522 static void do_thaw_all(struct work_struct *work)
523 {
524 	iterate_supers(do_thaw_one, NULL);
525 	kfree(work);
526 	printk(KERN_WARNING "Emergency Thaw complete\n");
527 }
528 
529 /**
530  * emergency_thaw_all -- forcibly thaw every frozen filesystem
531  *
532  * Used for emergency unfreeze of all filesystems via SysRq
533  */
emergency_thaw_all(void)534 void emergency_thaw_all(void)
535 {
536 	struct work_struct *work;
537 
538 	work = kmalloc(sizeof(*work), GFP_ATOMIC);
539 	if (work) {
540 		INIT_WORK(work, do_thaw_all);
541 		schedule_work(work);
542 	}
543 }
544 
545 /**
546  * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
547  * @mapping: the mapping which wants those buffers written
548  *
549  * Starts I/O against the buffers at mapping->private_list, and waits upon
550  * that I/O.
551  *
552  * Basically, this is a convenience function for fsync().
553  * @mapping is a file or directory which needs those buffers to be written for
554  * a successful fsync().
555  */
sync_mapping_buffers(struct address_space * mapping)556 int sync_mapping_buffers(struct address_space *mapping)
557 {
558 	struct address_space *buffer_mapping = mapping->assoc_mapping;
559 
560 	if (buffer_mapping == NULL || list_empty(&mapping->private_list))
561 		return 0;
562 
563 	return fsync_buffers_list(&buffer_mapping->private_lock,
564 					&mapping->private_list);
565 }
566 EXPORT_SYMBOL(sync_mapping_buffers);
567 
568 /*
569  * Called when we've recently written block `bblock', and it is known that
570  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
571  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
572  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
573  */
write_boundary_block(struct block_device * bdev,sector_t bblock,unsigned blocksize)574 void write_boundary_block(struct block_device *bdev,
575 			sector_t bblock, unsigned blocksize)
576 {
577 	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
578 	if (bh) {
579 		if (buffer_dirty(bh))
580 			ll_rw_block(WRITE, 1, &bh);
581 		put_bh(bh);
582 	}
583 }
584 
mark_buffer_dirty_inode(struct buffer_head * bh,struct inode * inode)585 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
586 {
587 	struct address_space *mapping = inode->i_mapping;
588 	struct address_space *buffer_mapping = bh->b_page->mapping;
589 
590 	mark_buffer_dirty(bh);
591 	if (!mapping->assoc_mapping) {
592 		mapping->assoc_mapping = buffer_mapping;
593 	} else {
594 		BUG_ON(mapping->assoc_mapping != buffer_mapping);
595 	}
596 	if (!bh->b_assoc_map) {
597 		spin_lock(&buffer_mapping->private_lock);
598 		list_move_tail(&bh->b_assoc_buffers,
599 				&mapping->private_list);
600 		bh->b_assoc_map = mapping;
601 		spin_unlock(&buffer_mapping->private_lock);
602 	}
603 }
604 EXPORT_SYMBOL(mark_buffer_dirty_inode);
605 
606 /*
607  * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
608  * dirty.
609  *
610  * If warn is true, then emit a warning if the page is not uptodate and has
611  * not been truncated.
612  */
__set_page_dirty(struct page * page,struct address_space * mapping,int warn)613 static void __set_page_dirty(struct page *page,
614 		struct address_space *mapping, int warn)
615 {
616 	spin_lock_irq(&mapping->tree_lock);
617 	if (page->mapping) {	/* Race with truncate? */
618 		WARN_ON_ONCE(warn && !PageUptodate(page));
619 		account_page_dirtied(page, mapping);
620 		radix_tree_tag_set(&mapping->page_tree,
621 				page_index(page), PAGECACHE_TAG_DIRTY);
622 	}
623 	spin_unlock_irq(&mapping->tree_lock);
624 	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
625 }
626 
627 /*
628  * Add a page to the dirty page list.
629  *
630  * It is a sad fact of life that this function is called from several places
631  * deeply under spinlocking.  It may not sleep.
632  *
633  * If the page has buffers, the uptodate buffers are set dirty, to preserve
634  * dirty-state coherency between the page and the buffers.  It the page does
635  * not have buffers then when they are later attached they will all be set
636  * dirty.
637  *
638  * The buffers are dirtied before the page is dirtied.  There's a small race
639  * window in which a writepage caller may see the page cleanness but not the
640  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
641  * before the buffers, a concurrent writepage caller could clear the page dirty
642  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
643  * page on the dirty page list.
644  *
645  * We use private_lock to lock against try_to_free_buffers while using the
646  * page's buffer list.  Also use this to protect against clean buffers being
647  * added to the page after it was set dirty.
648  *
649  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
650  * address_space though.
651  */
__set_page_dirty_buffers(struct page * page)652 int __set_page_dirty_buffers(struct page *page)
653 {
654 	int newly_dirty;
655 	struct address_space *mapping = page_mapping(page);
656 
657 	if (unlikely(!mapping))
658 		return !TestSetPageDirty(page);
659 
660 	spin_lock(&mapping->private_lock);
661 	if (page_has_buffers(page)) {
662 		struct buffer_head *head = page_buffers(page);
663 		struct buffer_head *bh = head;
664 
665 		do {
666 			set_buffer_dirty(bh);
667 			bh = bh->b_this_page;
668 		} while (bh != head);
669 	}
670 	newly_dirty = !TestSetPageDirty(page);
671 	spin_unlock(&mapping->private_lock);
672 
673 	if (newly_dirty)
674 		__set_page_dirty(page, mapping, 1);
675 	return newly_dirty;
676 }
677 EXPORT_SYMBOL(__set_page_dirty_buffers);
678 
679 /*
680  * Write out and wait upon a list of buffers.
681  *
682  * We have conflicting pressures: we want to make sure that all
683  * initially dirty buffers get waited on, but that any subsequently
684  * dirtied buffers don't.  After all, we don't want fsync to last
685  * forever if somebody is actively writing to the file.
686  *
687  * Do this in two main stages: first we copy dirty buffers to a
688  * temporary inode list, queueing the writes as we go.  Then we clean
689  * up, waiting for those writes to complete.
690  *
691  * During this second stage, any subsequent updates to the file may end
692  * up refiling the buffer on the original inode's dirty list again, so
693  * there is a chance we will end up with a buffer queued for write but
694  * not yet completed on that list.  So, as a final cleanup we go through
695  * the osync code to catch these locked, dirty buffers without requeuing
696  * any newly dirty buffers for write.
697  */
fsync_buffers_list(spinlock_t * lock,struct list_head * list)698 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
699 {
700 	struct buffer_head *bh;
701 	struct list_head tmp;
702 	struct address_space *mapping;
703 	int err = 0, err2;
704 	struct blk_plug plug;
705 
706 	INIT_LIST_HEAD(&tmp);
707 	blk_start_plug(&plug);
708 
709 	spin_lock(lock);
710 	while (!list_empty(list)) {
711 		bh = BH_ENTRY(list->next);
712 		mapping = bh->b_assoc_map;
713 		__remove_assoc_queue(bh);
714 		/* Avoid race with mark_buffer_dirty_inode() which does
715 		 * a lockless check and we rely on seeing the dirty bit */
716 		smp_mb();
717 		if (buffer_dirty(bh) || buffer_locked(bh)) {
718 			list_add(&bh->b_assoc_buffers, &tmp);
719 			bh->b_assoc_map = mapping;
720 			if (buffer_dirty(bh)) {
721 				get_bh(bh);
722 				spin_unlock(lock);
723 				/*
724 				 * Ensure any pending I/O completes so that
725 				 * write_dirty_buffer() actually writes the
726 				 * current contents - it is a noop if I/O is
727 				 * still in flight on potentially older
728 				 * contents.
729 				 */
730 				write_dirty_buffer(bh, WRITE_SYNC);
731 
732 				/*
733 				 * Kick off IO for the previous mapping. Note
734 				 * that we will not run the very last mapping,
735 				 * wait_on_buffer() will do that for us
736 				 * through sync_buffer().
737 				 */
738 				brelse(bh);
739 				spin_lock(lock);
740 			}
741 		}
742 	}
743 
744 	spin_unlock(lock);
745 	blk_finish_plug(&plug);
746 	spin_lock(lock);
747 
748 	while (!list_empty(&tmp)) {
749 		bh = BH_ENTRY(tmp.prev);
750 		get_bh(bh);
751 		mapping = bh->b_assoc_map;
752 		__remove_assoc_queue(bh);
753 		/* Avoid race with mark_buffer_dirty_inode() which does
754 		 * a lockless check and we rely on seeing the dirty bit */
755 		smp_mb();
756 		if (buffer_dirty(bh)) {
757 			list_add(&bh->b_assoc_buffers,
758 				 &mapping->private_list);
759 			bh->b_assoc_map = mapping;
760 		}
761 		spin_unlock(lock);
762 		wait_on_buffer(bh);
763 		if (!buffer_uptodate(bh))
764 			err = -EIO;
765 		brelse(bh);
766 		spin_lock(lock);
767 	}
768 
769 	spin_unlock(lock);
770 	err2 = osync_buffers_list(lock, list);
771 	if (err)
772 		return err;
773 	else
774 		return err2;
775 }
776 
777 /*
778  * Invalidate any and all dirty buffers on a given inode.  We are
779  * probably unmounting the fs, but that doesn't mean we have already
780  * done a sync().  Just drop the buffers from the inode list.
781  *
782  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
783  * assumes that all the buffers are against the blockdev.  Not true
784  * for reiserfs.
785  */
invalidate_inode_buffers(struct inode * inode)786 void invalidate_inode_buffers(struct inode *inode)
787 {
788 	if (inode_has_buffers(inode)) {
789 		struct address_space *mapping = &inode->i_data;
790 		struct list_head *list = &mapping->private_list;
791 		struct address_space *buffer_mapping = mapping->assoc_mapping;
792 
793 		spin_lock(&buffer_mapping->private_lock);
794 		while (!list_empty(list))
795 			__remove_assoc_queue(BH_ENTRY(list->next));
796 		spin_unlock(&buffer_mapping->private_lock);
797 	}
798 }
799 EXPORT_SYMBOL(invalidate_inode_buffers);
800 
801 /*
802  * Remove any clean buffers from the inode's buffer list.  This is called
803  * when we're trying to free the inode itself.  Those buffers can pin it.
804  *
805  * Returns true if all buffers were removed.
806  */
remove_inode_buffers(struct inode * inode)807 int remove_inode_buffers(struct inode *inode)
808 {
809 	int ret = 1;
810 
811 	if (inode_has_buffers(inode)) {
812 		struct address_space *mapping = &inode->i_data;
813 		struct list_head *list = &mapping->private_list;
814 		struct address_space *buffer_mapping = mapping->assoc_mapping;
815 
816 		spin_lock(&buffer_mapping->private_lock);
817 		while (!list_empty(list)) {
818 			struct buffer_head *bh = BH_ENTRY(list->next);
819 			if (buffer_dirty(bh)) {
820 				ret = 0;
821 				break;
822 			}
823 			__remove_assoc_queue(bh);
824 		}
825 		spin_unlock(&buffer_mapping->private_lock);
826 	}
827 	return ret;
828 }
829 
830 /*
831  * Create the appropriate buffers when given a page for data area and
832  * the size of each buffer.. Use the bh->b_this_page linked list to
833  * follow the buffers created.  Return NULL if unable to create more
834  * buffers.
835  *
836  * The retry flag is used to differentiate async IO (paging, swapping)
837  * which may not fail from ordinary buffer allocations.
838  */
alloc_page_buffers(struct page * page,unsigned long size,int retry)839 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
840 		int retry)
841 {
842 	struct buffer_head *bh, *head;
843 	long offset;
844 
845 try_again:
846 	head = NULL;
847 	offset = PAGE_SIZE;
848 	while ((offset -= size) >= 0) {
849 		bh = alloc_buffer_head(GFP_NOFS);
850 		if (!bh)
851 			goto no_grow;
852 
853 		bh->b_bdev = NULL;
854 		bh->b_this_page = head;
855 		bh->b_blocknr = -1;
856 		head = bh;
857 
858 		bh->b_state = 0;
859 		atomic_set(&bh->b_count, 0);
860 		bh->b_size = size;
861 
862 		/* Link the buffer to its page */
863 		set_bh_page(bh, page, offset);
864 
865 		init_buffer(bh, NULL, NULL);
866 	}
867 	return head;
868 /*
869  * In case anything failed, we just free everything we got.
870  */
871 no_grow:
872 	if (head) {
873 		do {
874 			bh = head;
875 			head = head->b_this_page;
876 			free_buffer_head(bh);
877 		} while (head);
878 	}
879 
880 	/*
881 	 * Return failure for non-async IO requests.  Async IO requests
882 	 * are not allowed to fail, so we have to wait until buffer heads
883 	 * become available.  But we don't want tasks sleeping with
884 	 * partially complete buffers, so all were released above.
885 	 */
886 	if (!retry)
887 		return NULL;
888 
889 	/* We're _really_ low on memory. Now we just
890 	 * wait for old buffer heads to become free due to
891 	 * finishing IO.  Since this is an async request and
892 	 * the reserve list is empty, we're sure there are
893 	 * async buffer heads in use.
894 	 */
895 	free_more_memory();
896 	goto try_again;
897 }
898 EXPORT_SYMBOL_GPL(alloc_page_buffers);
899 
900 static inline void
link_dev_buffers(struct page * page,struct buffer_head * head)901 link_dev_buffers(struct page *page, struct buffer_head *head)
902 {
903 	struct buffer_head *bh, *tail;
904 
905 	bh = head;
906 	do {
907 		tail = bh;
908 		bh = bh->b_this_page;
909 	} while (bh);
910 	tail->b_this_page = head;
911 	attach_page_buffers(page, head);
912 }
913 
914 /*
915  * Initialise the state of a blockdev page's buffers.
916  */
917 static void
init_page_buffers(struct page * page,struct block_device * bdev,sector_t block,int size)918 init_page_buffers(struct page *page, struct block_device *bdev,
919 			sector_t block, int size)
920 {
921 	struct buffer_head *head = page_buffers(page);
922 	struct buffer_head *bh = head;
923 	int uptodate = PageUptodate(page);
924 
925 	do {
926 		if (!buffer_mapped(bh)) {
927 			init_buffer(bh, NULL, NULL);
928 			bh->b_bdev = bdev;
929 			bh->b_blocknr = block;
930 			if (uptodate)
931 				set_buffer_uptodate(bh);
932 			set_buffer_mapped(bh);
933 		}
934 		block++;
935 		bh = bh->b_this_page;
936 	} while (bh != head);
937 }
938 
939 /*
940  * Create the page-cache page that contains the requested block.
941  *
942  * This is user purely for blockdev mappings.
943  */
944 static struct page *
grow_dev_page(struct block_device * bdev,sector_t block,pgoff_t index,int size)945 grow_dev_page(struct block_device *bdev, sector_t block,
946 		pgoff_t index, int size)
947 {
948 	struct inode *inode = bdev->bd_inode;
949 	struct page *page;
950 	struct buffer_head *bh;
951 
952 	page = find_or_create_page(inode->i_mapping, index,
953 		(mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
954 	if (!page)
955 		return NULL;
956 
957 	BUG_ON(!PageLocked(page));
958 
959 	if (page_has_buffers(page)) {
960 		bh = page_buffers(page);
961 		if (bh->b_size == size) {
962 			init_page_buffers(page, bdev, block, size);
963 			return page;
964 		}
965 		if (!try_to_free_buffers(page))
966 			goto failed;
967 	}
968 
969 	/*
970 	 * Allocate some buffers for this page
971 	 */
972 	bh = alloc_page_buffers(page, size, 0);
973 	if (!bh)
974 		goto failed;
975 
976 	/*
977 	 * Link the page to the buffers and initialise them.  Take the
978 	 * lock to be atomic wrt __find_get_block(), which does not
979 	 * run under the page lock.
980 	 */
981 	spin_lock(&inode->i_mapping->private_lock);
982 	link_dev_buffers(page, bh);
983 	init_page_buffers(page, bdev, block, size);
984 	spin_unlock(&inode->i_mapping->private_lock);
985 	return page;
986 
987 failed:
988 	BUG();
989 	unlock_page(page);
990 	page_cache_release(page);
991 	return NULL;
992 }
993 
994 /*
995  * Create buffers for the specified block device block's page.  If
996  * that page was dirty, the buffers are set dirty also.
997  */
998 static int
grow_buffers(struct block_device * bdev,sector_t block,int size)999 grow_buffers(struct block_device *bdev, sector_t block, int size)
1000 {
1001 	struct page *page;
1002 	pgoff_t index;
1003 	int sizebits;
1004 
1005 	sizebits = -1;
1006 	do {
1007 		sizebits++;
1008 	} while ((size << sizebits) < PAGE_SIZE);
1009 
1010 	index = block >> sizebits;
1011 
1012 	/*
1013 	 * Check for a block which wants to lie outside our maximum possible
1014 	 * pagecache index.  (this comparison is done using sector_t types).
1015 	 */
1016 	if (unlikely(index != block >> sizebits)) {
1017 		char b[BDEVNAME_SIZE];
1018 
1019 		printk(KERN_ERR "%s: requested out-of-range block %llu for "
1020 			"device %s\n",
1021 			__func__, (unsigned long long)block,
1022 			bdevname(bdev, b));
1023 		return -EIO;
1024 	}
1025 	block = index << sizebits;
1026 	/* Create a page with the proper size buffers.. */
1027 	page = grow_dev_page(bdev, block, index, size);
1028 	if (!page)
1029 		return 0;
1030 	unlock_page(page);
1031 	page_cache_release(page);
1032 	return 1;
1033 }
1034 
1035 static struct buffer_head *
__getblk_slow(struct block_device * bdev,sector_t block,int size)1036 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1037 {
1038 	/* Size must be multiple of hard sectorsize */
1039 	if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1040 			(size < 512 || size > PAGE_SIZE))) {
1041 		printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1042 					size);
1043 		printk(KERN_ERR "logical block size: %d\n",
1044 					bdev_logical_block_size(bdev));
1045 
1046 		dump_stack();
1047 		return NULL;
1048 	}
1049 
1050 	for (;;) {
1051 		struct buffer_head * bh;
1052 		int ret;
1053 
1054 		bh = __find_get_block(bdev, block, size);
1055 		if (bh)
1056 			return bh;
1057 
1058 		ret = grow_buffers(bdev, block, size);
1059 		if (ret < 0)
1060 			return NULL;
1061 		if (ret == 0)
1062 			free_more_memory();
1063 	}
1064 }
1065 
1066 /*
1067  * The relationship between dirty buffers and dirty pages:
1068  *
1069  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1070  * the page is tagged dirty in its radix tree.
1071  *
1072  * At all times, the dirtiness of the buffers represents the dirtiness of
1073  * subsections of the page.  If the page has buffers, the page dirty bit is
1074  * merely a hint about the true dirty state.
1075  *
1076  * When a page is set dirty in its entirety, all its buffers are marked dirty
1077  * (if the page has buffers).
1078  *
1079  * When a buffer is marked dirty, its page is dirtied, but the page's other
1080  * buffers are not.
1081  *
1082  * Also.  When blockdev buffers are explicitly read with bread(), they
1083  * individually become uptodate.  But their backing page remains not
1084  * uptodate - even if all of its buffers are uptodate.  A subsequent
1085  * block_read_full_page() against that page will discover all the uptodate
1086  * buffers, will set the page uptodate and will perform no I/O.
1087  */
1088 
1089 /**
1090  * mark_buffer_dirty - mark a buffer_head as needing writeout
1091  * @bh: the buffer_head to mark dirty
1092  *
1093  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1094  * backing page dirty, then tag the page as dirty in its address_space's radix
1095  * tree and then attach the address_space's inode to its superblock's dirty
1096  * inode list.
1097  *
1098  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
1099  * mapping->tree_lock and mapping->host->i_lock.
1100  */
mark_buffer_dirty(struct buffer_head * bh)1101 void mark_buffer_dirty(struct buffer_head *bh)
1102 {
1103 	WARN_ON_ONCE(!buffer_uptodate(bh));
1104 
1105 	/*
1106 	 * Very *carefully* optimize the it-is-already-dirty case.
1107 	 *
1108 	 * Don't let the final "is it dirty" escape to before we
1109 	 * perhaps modified the buffer.
1110 	 */
1111 	if (buffer_dirty(bh)) {
1112 		smp_mb();
1113 		if (buffer_dirty(bh))
1114 			return;
1115 	}
1116 
1117 	if (!test_set_buffer_dirty(bh)) {
1118 		struct page *page = bh->b_page;
1119 		if (!TestSetPageDirty(page)) {
1120 			struct address_space *mapping = page_mapping(page);
1121 			if (mapping)
1122 				__set_page_dirty(page, mapping, 0);
1123 		}
1124 	}
1125 }
1126 EXPORT_SYMBOL(mark_buffer_dirty);
1127 
1128 /*
1129  * Decrement a buffer_head's reference count.  If all buffers against a page
1130  * have zero reference count, are clean and unlocked, and if the page is clean
1131  * and unlocked then try_to_free_buffers() may strip the buffers from the page
1132  * in preparation for freeing it (sometimes, rarely, buffers are removed from
1133  * a page but it ends up not being freed, and buffers may later be reattached).
1134  */
__brelse(struct buffer_head * buf)1135 void __brelse(struct buffer_head * buf)
1136 {
1137 	if (atomic_read(&buf->b_count)) {
1138 		put_bh(buf);
1139 		return;
1140 	}
1141 	WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1142 }
1143 EXPORT_SYMBOL(__brelse);
1144 
1145 /*
1146  * bforget() is like brelse(), except it discards any
1147  * potentially dirty data.
1148  */
__bforget(struct buffer_head * bh)1149 void __bforget(struct buffer_head *bh)
1150 {
1151 	clear_buffer_dirty(bh);
1152 	if (bh->b_assoc_map) {
1153 		struct address_space *buffer_mapping = bh->b_page->mapping;
1154 
1155 		spin_lock(&buffer_mapping->private_lock);
1156 		list_del_init(&bh->b_assoc_buffers);
1157 		bh->b_assoc_map = NULL;
1158 		spin_unlock(&buffer_mapping->private_lock);
1159 	}
1160 	__brelse(bh);
1161 }
1162 EXPORT_SYMBOL(__bforget);
1163 
__bread_slow(struct buffer_head * bh)1164 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1165 {
1166 	lock_buffer(bh);
1167 	if (buffer_uptodate(bh)) {
1168 		unlock_buffer(bh);
1169 		return bh;
1170 	} else {
1171 		get_bh(bh);
1172 		bh->b_end_io = end_buffer_read_sync;
1173 		submit_bh(READ, bh);
1174 		wait_on_buffer(bh);
1175 		if (buffer_uptodate(bh))
1176 			return bh;
1177 	}
1178 	brelse(bh);
1179 	return NULL;
1180 }
1181 
1182 /*
1183  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
1184  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
1185  * refcount elevated by one when they're in an LRU.  A buffer can only appear
1186  * once in a particular CPU's LRU.  A single buffer can be present in multiple
1187  * CPU's LRUs at the same time.
1188  *
1189  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1190  * sb_find_get_block().
1191  *
1192  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
1193  * a local interrupt disable for that.
1194  */
1195 
1196 #define BH_LRU_SIZE	8
1197 
1198 struct bh_lru {
1199 	struct buffer_head *bhs[BH_LRU_SIZE];
1200 };
1201 
1202 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1203 
1204 #ifdef CONFIG_SMP
1205 #define bh_lru_lock()	local_irq_disable()
1206 #define bh_lru_unlock()	local_irq_enable()
1207 #else
1208 #define bh_lru_lock()	preempt_disable()
1209 #define bh_lru_unlock()	preempt_enable()
1210 #endif
1211 
check_irqs_on(void)1212 static inline void check_irqs_on(void)
1213 {
1214 #ifdef irqs_disabled
1215 	BUG_ON(irqs_disabled());
1216 #endif
1217 }
1218 
1219 /*
1220  * The LRU management algorithm is dopey-but-simple.  Sorry.
1221  */
bh_lru_install(struct buffer_head * bh)1222 static void bh_lru_install(struct buffer_head *bh)
1223 {
1224 	struct buffer_head *evictee = NULL;
1225 
1226 	check_irqs_on();
1227 	bh_lru_lock();
1228 	if (__this_cpu_read(bh_lrus.bhs[0]) != bh) {
1229 		struct buffer_head *bhs[BH_LRU_SIZE];
1230 		int in;
1231 		int out = 0;
1232 
1233 		get_bh(bh);
1234 		bhs[out++] = bh;
1235 		for (in = 0; in < BH_LRU_SIZE; in++) {
1236 			struct buffer_head *bh2 =
1237 				__this_cpu_read(bh_lrus.bhs[in]);
1238 
1239 			if (bh2 == bh) {
1240 				__brelse(bh2);
1241 			} else {
1242 				if (out >= BH_LRU_SIZE) {
1243 					BUG_ON(evictee != NULL);
1244 					evictee = bh2;
1245 				} else {
1246 					bhs[out++] = bh2;
1247 				}
1248 			}
1249 		}
1250 		while (out < BH_LRU_SIZE)
1251 			bhs[out++] = NULL;
1252 		memcpy(__this_cpu_ptr(&bh_lrus.bhs), bhs, sizeof(bhs));
1253 	}
1254 	bh_lru_unlock();
1255 
1256 	if (evictee)
1257 		__brelse(evictee);
1258 }
1259 
1260 /*
1261  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
1262  */
1263 static struct buffer_head *
lookup_bh_lru(struct block_device * bdev,sector_t block,unsigned size)1264 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1265 {
1266 	struct buffer_head *ret = NULL;
1267 	unsigned int i;
1268 
1269 	check_irqs_on();
1270 	bh_lru_lock();
1271 	for (i = 0; i < BH_LRU_SIZE; i++) {
1272 		struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
1273 
1274 		if (bh && bh->b_bdev == bdev &&
1275 				bh->b_blocknr == block && bh->b_size == size) {
1276 			if (i) {
1277 				while (i) {
1278 					__this_cpu_write(bh_lrus.bhs[i],
1279 						__this_cpu_read(bh_lrus.bhs[i - 1]));
1280 					i--;
1281 				}
1282 				__this_cpu_write(bh_lrus.bhs[0], bh);
1283 			}
1284 			get_bh(bh);
1285 			ret = bh;
1286 			break;
1287 		}
1288 	}
1289 	bh_lru_unlock();
1290 	return ret;
1291 }
1292 
1293 /*
1294  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
1295  * it in the LRU and mark it as accessed.  If it is not present then return
1296  * NULL
1297  */
1298 struct buffer_head *
__find_get_block(struct block_device * bdev,sector_t block,unsigned size)1299 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1300 {
1301 	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1302 
1303 	if (bh == NULL) {
1304 		bh = __find_get_block_slow(bdev, block);
1305 		if (bh)
1306 			bh_lru_install(bh);
1307 	}
1308 	if (bh)
1309 		touch_buffer(bh);
1310 	return bh;
1311 }
1312 EXPORT_SYMBOL(__find_get_block);
1313 
1314 /*
1315  * __getblk will locate (and, if necessary, create) the buffer_head
1316  * which corresponds to the passed block_device, block and size. The
1317  * returned buffer has its reference count incremented.
1318  *
1319  * __getblk() cannot fail - it just keeps trying.  If you pass it an
1320  * illegal block number, __getblk() will happily return a buffer_head
1321  * which represents the non-existent block.  Very weird.
1322  *
1323  * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1324  * attempt is failing.  FIXME, perhaps?
1325  */
1326 struct buffer_head *
__getblk(struct block_device * bdev,sector_t block,unsigned size)1327 __getblk(struct block_device *bdev, sector_t block, unsigned size)
1328 {
1329 	struct buffer_head *bh = __find_get_block(bdev, block, size);
1330 
1331 	might_sleep();
1332 	if (bh == NULL)
1333 		bh = __getblk_slow(bdev, block, size);
1334 	return bh;
1335 }
1336 EXPORT_SYMBOL(__getblk);
1337 
1338 /*
1339  * Do async read-ahead on a buffer..
1340  */
__breadahead(struct block_device * bdev,sector_t block,unsigned size)1341 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1342 {
1343 	struct buffer_head *bh = __getblk(bdev, block, size);
1344 	if (likely(bh)) {
1345 		ll_rw_block(READA, 1, &bh);
1346 		brelse(bh);
1347 	}
1348 }
1349 EXPORT_SYMBOL(__breadahead);
1350 
1351 /**
1352  *  __bread() - reads a specified block and returns the bh
1353  *  @bdev: the block_device to read from
1354  *  @block: number of block
1355  *  @size: size (in bytes) to read
1356  *
1357  *  Reads a specified block, and returns buffer head that contains it.
1358  *  It returns NULL if the block was unreadable.
1359  */
1360 struct buffer_head *
__bread(struct block_device * bdev,sector_t block,unsigned size)1361 __bread(struct block_device *bdev, sector_t block, unsigned size)
1362 {
1363 	struct buffer_head *bh = __getblk(bdev, block, size);
1364 
1365 	if (likely(bh) && !buffer_uptodate(bh))
1366 		bh = __bread_slow(bh);
1367 	return bh;
1368 }
1369 EXPORT_SYMBOL(__bread);
1370 
1371 /*
1372  * invalidate_bh_lrus() is called rarely - but not only at unmount.
1373  * This doesn't race because it runs in each cpu either in irq
1374  * or with preempt disabled.
1375  */
invalidate_bh_lru(void * arg)1376 static void invalidate_bh_lru(void *arg)
1377 {
1378 	struct bh_lru *b = &get_cpu_var(bh_lrus);
1379 	int i;
1380 
1381 	for (i = 0; i < BH_LRU_SIZE; i++) {
1382 		brelse(b->bhs[i]);
1383 		b->bhs[i] = NULL;
1384 	}
1385 	put_cpu_var(bh_lrus);
1386 }
1387 
invalidate_bh_lrus(void)1388 void invalidate_bh_lrus(void)
1389 {
1390 	on_each_cpu(invalidate_bh_lru, NULL, 1);
1391 }
1392 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1393 
set_bh_page(struct buffer_head * bh,struct page * page,unsigned long offset)1394 void set_bh_page(struct buffer_head *bh,
1395 		struct page *page, unsigned long offset)
1396 {
1397 	bh->b_page = page;
1398 	BUG_ON(offset >= PAGE_SIZE);
1399 	if (PageHighMem(page))
1400 		/*
1401 		 * This catches illegal uses and preserves the offset:
1402 		 */
1403 		bh->b_data = (char *)(0 + offset);
1404 	else
1405 		bh->b_data = page_address(page) + offset;
1406 }
1407 EXPORT_SYMBOL(set_bh_page);
1408 
1409 /*
1410  * Called when truncating a buffer on a page completely.
1411  */
discard_buffer(struct buffer_head * bh)1412 static void discard_buffer(struct buffer_head * bh)
1413 {
1414 	lock_buffer(bh);
1415 	clear_buffer_dirty(bh);
1416 	bh->b_bdev = NULL;
1417 	clear_buffer_mapped(bh);
1418 	clear_buffer_req(bh);
1419 	clear_buffer_new(bh);
1420 	clear_buffer_delay(bh);
1421 	clear_buffer_unwritten(bh);
1422 	unlock_buffer(bh);
1423 }
1424 
1425 /**
1426  * block_invalidatepage - invalidate part or all of a buffer-backed page
1427  *
1428  * @page: the page which is affected
1429  * @offset: the index of the truncation point
1430  *
1431  * block_invalidatepage() is called when all or part of the page has become
1432  * invalidated by a truncate operation.
1433  *
1434  * block_invalidatepage() does not have to release all buffers, but it must
1435  * ensure that no dirty buffer is left outside @offset and that no I/O
1436  * is underway against any of the blocks which are outside the truncation
1437  * point.  Because the caller is about to free (and possibly reuse) those
1438  * blocks on-disk.
1439  */
block_invalidatepage(struct page * page,unsigned long offset)1440 void block_invalidatepage(struct page *page, unsigned long offset)
1441 {
1442 	struct buffer_head *head, *bh, *next;
1443 	unsigned int curr_off = 0;
1444 
1445 	BUG_ON(!PageLocked(page));
1446 	if (!page_has_buffers(page))
1447 		goto out;
1448 
1449 	head = page_buffers(page);
1450 	bh = head;
1451 	do {
1452 		unsigned int next_off = curr_off + bh->b_size;
1453 		next = bh->b_this_page;
1454 
1455 		/*
1456 		 * is this block fully invalidated?
1457 		 */
1458 		if (offset <= curr_off)
1459 			discard_buffer(bh);
1460 		curr_off = next_off;
1461 		bh = next;
1462 	} while (bh != head);
1463 
1464 	/*
1465 	 * We release buffers only if the entire page is being invalidated.
1466 	 * The get_block cached value has been unconditionally invalidated,
1467 	 * so real IO is not possible anymore.
1468 	 */
1469 	if (offset == 0)
1470 		try_to_release_page(page, 0);
1471 out:
1472 	return;
1473 }
1474 EXPORT_SYMBOL(block_invalidatepage);
1475 
1476 /*
1477  * We attach and possibly dirty the buffers atomically wrt
1478  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
1479  * is already excluded via the page lock.
1480  */
create_empty_buffers(struct page * page,unsigned long blocksize,unsigned long b_state)1481 void create_empty_buffers(struct page *page,
1482 			unsigned long blocksize, unsigned long b_state)
1483 {
1484 	struct buffer_head *bh, *head, *tail;
1485 
1486 	head = alloc_page_buffers(page, blocksize, 1);
1487 	bh = head;
1488 	do {
1489 		bh->b_state |= b_state;
1490 		tail = bh;
1491 		bh = bh->b_this_page;
1492 	} while (bh);
1493 	tail->b_this_page = head;
1494 
1495 	spin_lock(&page->mapping->private_lock);
1496 	if (PageUptodate(page) || PageDirty(page)) {
1497 		bh = head;
1498 		do {
1499 			if (PageDirty(page))
1500 				set_buffer_dirty(bh);
1501 			if (PageUptodate(page))
1502 				set_buffer_uptodate(bh);
1503 			bh = bh->b_this_page;
1504 		} while (bh != head);
1505 	}
1506 	attach_page_buffers(page, head);
1507 	spin_unlock(&page->mapping->private_lock);
1508 }
1509 EXPORT_SYMBOL(create_empty_buffers);
1510 
1511 /*
1512  * We are taking a block for data and we don't want any output from any
1513  * buffer-cache aliases starting from return from that function and
1514  * until the moment when something will explicitly mark the buffer
1515  * dirty (hopefully that will not happen until we will free that block ;-)
1516  * We don't even need to mark it not-uptodate - nobody can expect
1517  * anything from a newly allocated buffer anyway. We used to used
1518  * unmap_buffer() for such invalidation, but that was wrong. We definitely
1519  * don't want to mark the alias unmapped, for example - it would confuse
1520  * anyone who might pick it with bread() afterwards...
1521  *
1522  * Also..  Note that bforget() doesn't lock the buffer.  So there can
1523  * be writeout I/O going on against recently-freed buffers.  We don't
1524  * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1525  * only if we really need to.  That happens here.
1526  */
unmap_underlying_metadata(struct block_device * bdev,sector_t block)1527 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1528 {
1529 	struct buffer_head *old_bh;
1530 
1531 	might_sleep();
1532 
1533 	old_bh = __find_get_block_slow(bdev, block);
1534 	if (old_bh) {
1535 		clear_buffer_dirty(old_bh);
1536 		wait_on_buffer(old_bh);
1537 		clear_buffer_req(old_bh);
1538 		__brelse(old_bh);
1539 	}
1540 }
1541 EXPORT_SYMBOL(unmap_underlying_metadata);
1542 
1543 /*
1544  * NOTE! All mapped/uptodate combinations are valid:
1545  *
1546  *	Mapped	Uptodate	Meaning
1547  *
1548  *	No	No		"unknown" - must do get_block()
1549  *	No	Yes		"hole" - zero-filled
1550  *	Yes	No		"allocated" - allocated on disk, not read in
1551  *	Yes	Yes		"valid" - allocated and up-to-date in memory.
1552  *
1553  * "Dirty" is valid only with the last case (mapped+uptodate).
1554  */
1555 
1556 /*
1557  * While block_write_full_page is writing back the dirty buffers under
1558  * the page lock, whoever dirtied the buffers may decide to clean them
1559  * again at any time.  We handle that by only looking at the buffer
1560  * state inside lock_buffer().
1561  *
1562  * If block_write_full_page() is called for regular writeback
1563  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1564  * locked buffer.   This only can happen if someone has written the buffer
1565  * directly, with submit_bh().  At the address_space level PageWriteback
1566  * prevents this contention from occurring.
1567  *
1568  * If block_write_full_page() is called with wbc->sync_mode ==
1569  * WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this
1570  * causes the writes to be flagged as synchronous writes.
1571  */
__block_write_full_page(struct inode * inode,struct page * page,get_block_t * get_block,struct writeback_control * wbc,bh_end_io_t * handler)1572 static int __block_write_full_page(struct inode *inode, struct page *page,
1573 			get_block_t *get_block, struct writeback_control *wbc,
1574 			bh_end_io_t *handler)
1575 {
1576 	int err;
1577 	sector_t block;
1578 	sector_t last_block;
1579 	struct buffer_head *bh, *head;
1580 	const unsigned blocksize = 1 << inode->i_blkbits;
1581 	int nr_underway = 0;
1582 	int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
1583 			WRITE_SYNC : WRITE);
1584 
1585 	BUG_ON(!PageLocked(page));
1586 
1587 	last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1588 
1589 	if (!page_has_buffers(page)) {
1590 		create_empty_buffers(page, blocksize,
1591 					(1 << BH_Dirty)|(1 << BH_Uptodate));
1592 	}
1593 
1594 	/*
1595 	 * Be very careful.  We have no exclusion from __set_page_dirty_buffers
1596 	 * here, and the (potentially unmapped) buffers may become dirty at
1597 	 * any time.  If a buffer becomes dirty here after we've inspected it
1598 	 * then we just miss that fact, and the page stays dirty.
1599 	 *
1600 	 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1601 	 * handle that here by just cleaning them.
1602 	 */
1603 
1604 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1605 	head = page_buffers(page);
1606 	bh = head;
1607 
1608 	/*
1609 	 * Get all the dirty buffers mapped to disk addresses and
1610 	 * handle any aliases from the underlying blockdev's mapping.
1611 	 */
1612 	do {
1613 		if (block > last_block) {
1614 			/*
1615 			 * mapped buffers outside i_size will occur, because
1616 			 * this page can be outside i_size when there is a
1617 			 * truncate in progress.
1618 			 */
1619 			/*
1620 			 * The buffer was zeroed by block_write_full_page()
1621 			 */
1622 			clear_buffer_dirty(bh);
1623 			set_buffer_uptodate(bh);
1624 		} else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1625 			   buffer_dirty(bh)) {
1626 			WARN_ON(bh->b_size != blocksize);
1627 			err = get_block(inode, block, bh, 1);
1628 			if (err)
1629 				goto recover;
1630 			clear_buffer_delay(bh);
1631 			if (buffer_new(bh)) {
1632 				/* blockdev mappings never come here */
1633 				clear_buffer_new(bh);
1634 				unmap_underlying_metadata(bh->b_bdev,
1635 							bh->b_blocknr);
1636 			}
1637 		}
1638 		bh = bh->b_this_page;
1639 		block++;
1640 	} while (bh != head);
1641 
1642 	do {
1643 		if (!buffer_mapped(bh))
1644 			continue;
1645 		/*
1646 		 * If it's a fully non-blocking write attempt and we cannot
1647 		 * lock the buffer then redirty the page.  Note that this can
1648 		 * potentially cause a busy-wait loop from writeback threads
1649 		 * and kswapd activity, but those code paths have their own
1650 		 * higher-level throttling.
1651 		 */
1652 		if (wbc->sync_mode != WB_SYNC_NONE) {
1653 			lock_buffer(bh);
1654 		} else if (!trylock_buffer(bh)) {
1655 			redirty_page_for_writepage(wbc, page);
1656 			continue;
1657 		}
1658 		if (test_clear_buffer_dirty(bh)) {
1659 			mark_buffer_async_write_endio(bh, handler);
1660 		} else {
1661 			unlock_buffer(bh);
1662 		}
1663 	} while ((bh = bh->b_this_page) != head);
1664 
1665 	/*
1666 	 * The page and its buffers are protected by PageWriteback(), so we can
1667 	 * drop the bh refcounts early.
1668 	 */
1669 	BUG_ON(PageWriteback(page));
1670 	set_page_writeback(page);
1671 
1672 	do {
1673 		struct buffer_head *next = bh->b_this_page;
1674 		if (buffer_async_write(bh)) {
1675 			submit_bh(write_op, bh);
1676 			nr_underway++;
1677 		}
1678 		bh = next;
1679 	} while (bh != head);
1680 	unlock_page(page);
1681 
1682 	err = 0;
1683 done:
1684 	if (nr_underway == 0) {
1685 		/*
1686 		 * The page was marked dirty, but the buffers were
1687 		 * clean.  Someone wrote them back by hand with
1688 		 * ll_rw_block/submit_bh.  A rare case.
1689 		 */
1690 		end_page_writeback(page);
1691 
1692 		/*
1693 		 * The page and buffer_heads can be released at any time from
1694 		 * here on.
1695 		 */
1696 	}
1697 	return err;
1698 
1699 recover:
1700 	/*
1701 	 * ENOSPC, or some other error.  We may already have added some
1702 	 * blocks to the file, so we need to write these out to avoid
1703 	 * exposing stale data.
1704 	 * The page is currently locked and not marked for writeback
1705 	 */
1706 	bh = head;
1707 	/* Recovery: lock and submit the mapped buffers */
1708 	do {
1709 		if (buffer_mapped(bh) && buffer_dirty(bh) &&
1710 		    !buffer_delay(bh)) {
1711 			lock_buffer(bh);
1712 			mark_buffer_async_write_endio(bh, handler);
1713 		} else {
1714 			/*
1715 			 * The buffer may have been set dirty during
1716 			 * attachment to a dirty page.
1717 			 */
1718 			clear_buffer_dirty(bh);
1719 		}
1720 	} while ((bh = bh->b_this_page) != head);
1721 	SetPageError(page);
1722 	BUG_ON(PageWriteback(page));
1723 	mapping_set_error(page->mapping, err);
1724 	set_page_writeback(page);
1725 	do {
1726 		struct buffer_head *next = bh->b_this_page;
1727 		if (buffer_async_write(bh)) {
1728 			clear_buffer_dirty(bh);
1729 			submit_bh(write_op, bh);
1730 			nr_underway++;
1731 		}
1732 		bh = next;
1733 	} while (bh != head);
1734 	unlock_page(page);
1735 	goto done;
1736 }
1737 
1738 /*
1739  * If a page has any new buffers, zero them out here, and mark them uptodate
1740  * and dirty so they'll be written out (in order to prevent uninitialised
1741  * block data from leaking). And clear the new bit.
1742  */
page_zero_new_buffers(struct page * page,unsigned from,unsigned to)1743 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1744 {
1745 	unsigned int block_start, block_end;
1746 	struct buffer_head *head, *bh;
1747 
1748 	BUG_ON(!PageLocked(page));
1749 	if (!page_has_buffers(page))
1750 		return;
1751 
1752 	bh = head = page_buffers(page);
1753 	block_start = 0;
1754 	do {
1755 		block_end = block_start + bh->b_size;
1756 
1757 		if (buffer_new(bh)) {
1758 			if (block_end > from && block_start < to) {
1759 				if (!PageUptodate(page)) {
1760 					unsigned start, size;
1761 
1762 					start = max(from, block_start);
1763 					size = min(to, block_end) - start;
1764 
1765 					zero_user(page, start, size);
1766 					set_buffer_uptodate(bh);
1767 				}
1768 
1769 				clear_buffer_new(bh);
1770 				mark_buffer_dirty(bh);
1771 			}
1772 		}
1773 
1774 		block_start = block_end;
1775 		bh = bh->b_this_page;
1776 	} while (bh != head);
1777 }
1778 EXPORT_SYMBOL(page_zero_new_buffers);
1779 
__block_write_begin(struct page * page,loff_t pos,unsigned len,get_block_t * get_block)1780 int __block_write_begin(struct page *page, loff_t pos, unsigned len,
1781 		get_block_t *get_block)
1782 {
1783 	unsigned from = pos & (PAGE_CACHE_SIZE - 1);
1784 	unsigned to = from + len;
1785 	struct inode *inode = page->mapping->host;
1786 	unsigned block_start, block_end;
1787 	sector_t block;
1788 	int err = 0;
1789 	unsigned blocksize, bbits;
1790 	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1791 
1792 	BUG_ON(!PageLocked(page));
1793 	BUG_ON(from > PAGE_CACHE_SIZE);
1794 	BUG_ON(to > PAGE_CACHE_SIZE);
1795 	BUG_ON(from > to);
1796 
1797 	blocksize = 1 << inode->i_blkbits;
1798 	if (!page_has_buffers(page))
1799 		create_empty_buffers(page, blocksize, 0);
1800 	head = page_buffers(page);
1801 
1802 	bbits = inode->i_blkbits;
1803 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1804 
1805 	for(bh = head, block_start = 0; bh != head || !block_start;
1806 	    block++, block_start=block_end, bh = bh->b_this_page) {
1807 		block_end = block_start + blocksize;
1808 		if (block_end <= from || block_start >= to) {
1809 			if (PageUptodate(page)) {
1810 				if (!buffer_uptodate(bh))
1811 					set_buffer_uptodate(bh);
1812 			}
1813 			continue;
1814 		}
1815 		if (buffer_new(bh))
1816 			clear_buffer_new(bh);
1817 		if (!buffer_mapped(bh)) {
1818 			WARN_ON(bh->b_size != blocksize);
1819 			err = get_block(inode, block, bh, 1);
1820 			if (err)
1821 				break;
1822 			if (buffer_new(bh)) {
1823 				unmap_underlying_metadata(bh->b_bdev,
1824 							bh->b_blocknr);
1825 				if (PageUptodate(page)) {
1826 					clear_buffer_new(bh);
1827 					set_buffer_uptodate(bh);
1828 					mark_buffer_dirty(bh);
1829 					continue;
1830 				}
1831 				if (block_end > to || block_start < from)
1832 					zero_user_segments(page,
1833 						to, block_end,
1834 						block_start, from);
1835 				continue;
1836 			}
1837 		}
1838 		if (PageUptodate(page)) {
1839 			if (!buffer_uptodate(bh))
1840 				set_buffer_uptodate(bh);
1841 			continue;
1842 		}
1843 		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1844 		    !buffer_unwritten(bh) &&
1845 		     (block_start < from || block_end > to)) {
1846 			ll_rw_block(READ, 1, &bh);
1847 			*wait_bh++=bh;
1848 		}
1849 	}
1850 	/*
1851 	 * If we issued read requests - let them complete.
1852 	 */
1853 	while(wait_bh > wait) {
1854 		wait_on_buffer(*--wait_bh);
1855 		if (!buffer_uptodate(*wait_bh))
1856 			err = -EIO;
1857 	}
1858 	if (unlikely(err))
1859 		page_zero_new_buffers(page, from, to);
1860 	return err;
1861 }
1862 EXPORT_SYMBOL(__block_write_begin);
1863 
__block_commit_write(struct inode * inode,struct page * page,unsigned from,unsigned to)1864 static int __block_commit_write(struct inode *inode, struct page *page,
1865 		unsigned from, unsigned to)
1866 {
1867 	unsigned block_start, block_end;
1868 	int partial = 0;
1869 	unsigned blocksize;
1870 	struct buffer_head *bh, *head;
1871 
1872 	blocksize = 1 << inode->i_blkbits;
1873 
1874 	for(bh = head = page_buffers(page), block_start = 0;
1875 	    bh != head || !block_start;
1876 	    block_start=block_end, bh = bh->b_this_page) {
1877 		block_end = block_start + blocksize;
1878 		if (block_end <= from || block_start >= to) {
1879 			if (!buffer_uptodate(bh))
1880 				partial = 1;
1881 		} else {
1882 			set_buffer_uptodate(bh);
1883 			mark_buffer_dirty(bh);
1884 		}
1885 		clear_buffer_new(bh);
1886 	}
1887 
1888 	/*
1889 	 * If this is a partial write which happened to make all buffers
1890 	 * uptodate then we can optimize away a bogus readpage() for
1891 	 * the next read(). Here we 'discover' whether the page went
1892 	 * uptodate as a result of this (potentially partial) write.
1893 	 */
1894 	if (!partial)
1895 		SetPageUptodate(page);
1896 	return 0;
1897 }
1898 
1899 /*
1900  * block_write_begin takes care of the basic task of block allocation and
1901  * bringing partial write blocks uptodate first.
1902  *
1903  * The filesystem needs to handle block truncation upon failure.
1904  */
block_write_begin(struct address_space * mapping,loff_t pos,unsigned len,unsigned flags,struct page ** pagep,get_block_t * get_block)1905 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
1906 		unsigned flags, struct page **pagep, get_block_t *get_block)
1907 {
1908 	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1909 	struct page *page;
1910 	int status;
1911 
1912 	page = grab_cache_page_write_begin(mapping, index, flags);
1913 	if (!page)
1914 		return -ENOMEM;
1915 
1916 	status = __block_write_begin(page, pos, len, get_block);
1917 	if (unlikely(status)) {
1918 		unlock_page(page);
1919 		page_cache_release(page);
1920 		page = NULL;
1921 	}
1922 
1923 	*pagep = page;
1924 	return status;
1925 }
1926 EXPORT_SYMBOL(block_write_begin);
1927 
block_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct page * page,void * fsdata)1928 int block_write_end(struct file *file, struct address_space *mapping,
1929 			loff_t pos, unsigned len, unsigned copied,
1930 			struct page *page, void *fsdata)
1931 {
1932 	struct inode *inode = mapping->host;
1933 	unsigned start;
1934 
1935 	start = pos & (PAGE_CACHE_SIZE - 1);
1936 
1937 	if (unlikely(copied < len)) {
1938 		/*
1939 		 * The buffers that were written will now be uptodate, so we
1940 		 * don't have to worry about a readpage reading them and
1941 		 * overwriting a partial write. However if we have encountered
1942 		 * a short write and only partially written into a buffer, it
1943 		 * will not be marked uptodate, so a readpage might come in and
1944 		 * destroy our partial write.
1945 		 *
1946 		 * Do the simplest thing, and just treat any short write to a
1947 		 * non uptodate page as a zero-length write, and force the
1948 		 * caller to redo the whole thing.
1949 		 */
1950 		if (!PageUptodate(page))
1951 			copied = 0;
1952 
1953 		page_zero_new_buffers(page, start+copied, start+len);
1954 	}
1955 	flush_dcache_page(page);
1956 
1957 	/* This could be a short (even 0-length) commit */
1958 	__block_commit_write(inode, page, start, start+copied);
1959 
1960 	return copied;
1961 }
1962 EXPORT_SYMBOL(block_write_end);
1963 
generic_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct page * page,void * fsdata)1964 int generic_write_end(struct file *file, struct address_space *mapping,
1965 			loff_t pos, unsigned len, unsigned copied,
1966 			struct page *page, void *fsdata)
1967 {
1968 	struct inode *inode = mapping->host;
1969 	int i_size_changed = 0;
1970 
1971 	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
1972 
1973 	/*
1974 	 * No need to use i_size_read() here, the i_size
1975 	 * cannot change under us because we hold i_mutex.
1976 	 *
1977 	 * But it's important to update i_size while still holding page lock:
1978 	 * page writeout could otherwise come in and zero beyond i_size.
1979 	 */
1980 	if (pos+copied > inode->i_size) {
1981 		i_size_write(inode, pos+copied);
1982 		i_size_changed = 1;
1983 	}
1984 
1985 	unlock_page(page);
1986 	page_cache_release(page);
1987 
1988 	/*
1989 	 * Don't mark the inode dirty under page lock. First, it unnecessarily
1990 	 * makes the holding time of page lock longer. Second, it forces lock
1991 	 * ordering of page lock and transaction start for journaling
1992 	 * filesystems.
1993 	 */
1994 	if (i_size_changed)
1995 		mark_inode_dirty(inode);
1996 
1997 	return copied;
1998 }
1999 EXPORT_SYMBOL(generic_write_end);
2000 
2001 /*
2002  * block_is_partially_uptodate checks whether buffers within a page are
2003  * uptodate or not.
2004  *
2005  * Returns true if all buffers which correspond to a file portion
2006  * we want to read are uptodate.
2007  */
block_is_partially_uptodate(struct page * page,read_descriptor_t * desc,unsigned long from)2008 int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2009 					unsigned long from)
2010 {
2011 	struct inode *inode = page->mapping->host;
2012 	unsigned block_start, block_end, blocksize;
2013 	unsigned to;
2014 	struct buffer_head *bh, *head;
2015 	int ret = 1;
2016 
2017 	if (!page_has_buffers(page))
2018 		return 0;
2019 
2020 	blocksize = 1 << inode->i_blkbits;
2021 	to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2022 	to = from + to;
2023 	if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2024 		return 0;
2025 
2026 	head = page_buffers(page);
2027 	bh = head;
2028 	block_start = 0;
2029 	do {
2030 		block_end = block_start + blocksize;
2031 		if (block_end > from && block_start < to) {
2032 			if (!buffer_uptodate(bh)) {
2033 				ret = 0;
2034 				break;
2035 			}
2036 			if (block_end >= to)
2037 				break;
2038 		}
2039 		block_start = block_end;
2040 		bh = bh->b_this_page;
2041 	} while (bh != head);
2042 
2043 	return ret;
2044 }
2045 EXPORT_SYMBOL(block_is_partially_uptodate);
2046 
2047 /*
2048  * Generic "read page" function for block devices that have the normal
2049  * get_block functionality. This is most of the block device filesystems.
2050  * Reads the page asynchronously --- the unlock_buffer() and
2051  * set/clear_buffer_uptodate() functions propagate buffer state into the
2052  * page struct once IO has completed.
2053  */
block_read_full_page(struct page * page,get_block_t * get_block)2054 int block_read_full_page(struct page *page, get_block_t *get_block)
2055 {
2056 	struct inode *inode = page->mapping->host;
2057 	sector_t iblock, lblock;
2058 	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2059 	unsigned int blocksize;
2060 	int nr, i;
2061 	int fully_mapped = 1;
2062 
2063 	BUG_ON(!PageLocked(page));
2064 	blocksize = 1 << inode->i_blkbits;
2065 	if (!page_has_buffers(page))
2066 		create_empty_buffers(page, blocksize, 0);
2067 	head = page_buffers(page);
2068 
2069 	iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2070 	lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2071 	bh = head;
2072 	nr = 0;
2073 	i = 0;
2074 
2075 	do {
2076 		if (buffer_uptodate(bh))
2077 			continue;
2078 
2079 		if (!buffer_mapped(bh)) {
2080 			int err = 0;
2081 
2082 			fully_mapped = 0;
2083 			if (iblock < lblock) {
2084 				WARN_ON(bh->b_size != blocksize);
2085 				err = get_block(inode, iblock, bh, 0);
2086 				if (err)
2087 					SetPageError(page);
2088 			}
2089 			if (!buffer_mapped(bh)) {
2090 				zero_user(page, i * blocksize, blocksize);
2091 				if (!err)
2092 					set_buffer_uptodate(bh);
2093 				continue;
2094 			}
2095 			/*
2096 			 * get_block() might have updated the buffer
2097 			 * synchronously
2098 			 */
2099 			if (buffer_uptodate(bh))
2100 				continue;
2101 		}
2102 		arr[nr++] = bh;
2103 	} while (i++, iblock++, (bh = bh->b_this_page) != head);
2104 
2105 	if (fully_mapped)
2106 		SetPageMappedToDisk(page);
2107 
2108 	if (!nr) {
2109 		/*
2110 		 * All buffers are uptodate - we can set the page uptodate
2111 		 * as well. But not if get_block() returned an error.
2112 		 */
2113 		if (!PageError(page))
2114 			SetPageUptodate(page);
2115 		unlock_page(page);
2116 		return 0;
2117 	}
2118 
2119 	/* Stage two: lock the buffers */
2120 	for (i = 0; i < nr; i++) {
2121 		bh = arr[i];
2122 		lock_buffer(bh);
2123 		mark_buffer_async_read(bh);
2124 	}
2125 
2126 	/*
2127 	 * Stage 3: start the IO.  Check for uptodateness
2128 	 * inside the buffer lock in case another process reading
2129 	 * the underlying blockdev brought it uptodate (the sct fix).
2130 	 */
2131 	for (i = 0; i < nr; i++) {
2132 		bh = arr[i];
2133 		if (buffer_uptodate(bh))
2134 			end_buffer_async_read(bh, 1);
2135 		else
2136 			submit_bh(READ, bh);
2137 	}
2138 	return 0;
2139 }
2140 EXPORT_SYMBOL(block_read_full_page);
2141 
2142 /* utility function for filesystems that need to do work on expanding
2143  * truncates.  Uses filesystem pagecache writes to allow the filesystem to
2144  * deal with the hole.
2145  */
generic_cont_expand_simple(struct inode * inode,loff_t size)2146 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2147 {
2148 	struct address_space *mapping = inode->i_mapping;
2149 	struct page *page;
2150 	void *fsdata;
2151 	int err;
2152 
2153 	err = inode_newsize_ok(inode, size);
2154 	if (err)
2155 		goto out;
2156 
2157 	err = pagecache_write_begin(NULL, mapping, size, 0,
2158 				AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2159 				&page, &fsdata);
2160 	if (err)
2161 		goto out;
2162 
2163 	err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2164 	BUG_ON(err > 0);
2165 
2166 out:
2167 	return err;
2168 }
2169 EXPORT_SYMBOL(generic_cont_expand_simple);
2170 
cont_expand_zero(struct file * file,struct address_space * mapping,loff_t pos,loff_t * bytes)2171 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2172 			    loff_t pos, loff_t *bytes)
2173 {
2174 	struct inode *inode = mapping->host;
2175 	unsigned blocksize = 1 << inode->i_blkbits;
2176 	struct page *page;
2177 	void *fsdata;
2178 	pgoff_t index, curidx;
2179 	loff_t curpos;
2180 	unsigned zerofrom, offset, len;
2181 	int err = 0;
2182 
2183 	index = pos >> PAGE_CACHE_SHIFT;
2184 	offset = pos & ~PAGE_CACHE_MASK;
2185 
2186 	while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2187 		zerofrom = curpos & ~PAGE_CACHE_MASK;
2188 		if (zerofrom & (blocksize-1)) {
2189 			*bytes |= (blocksize-1);
2190 			(*bytes)++;
2191 		}
2192 		len = PAGE_CACHE_SIZE - zerofrom;
2193 
2194 		err = pagecache_write_begin(file, mapping, curpos, len,
2195 						AOP_FLAG_UNINTERRUPTIBLE,
2196 						&page, &fsdata);
2197 		if (err)
2198 			goto out;
2199 		zero_user(page, zerofrom, len);
2200 		err = pagecache_write_end(file, mapping, curpos, len, len,
2201 						page, fsdata);
2202 		if (err < 0)
2203 			goto out;
2204 		BUG_ON(err != len);
2205 		err = 0;
2206 
2207 		balance_dirty_pages_ratelimited(mapping);
2208 	}
2209 
2210 	/* page covers the boundary, find the boundary offset */
2211 	if (index == curidx) {
2212 		zerofrom = curpos & ~PAGE_CACHE_MASK;
2213 		/* if we will expand the thing last block will be filled */
2214 		if (offset <= zerofrom) {
2215 			goto out;
2216 		}
2217 		if (zerofrom & (blocksize-1)) {
2218 			*bytes |= (blocksize-1);
2219 			(*bytes)++;
2220 		}
2221 		len = offset - zerofrom;
2222 
2223 		err = pagecache_write_begin(file, mapping, curpos, len,
2224 						AOP_FLAG_UNINTERRUPTIBLE,
2225 						&page, &fsdata);
2226 		if (err)
2227 			goto out;
2228 		zero_user(page, zerofrom, len);
2229 		err = pagecache_write_end(file, mapping, curpos, len, len,
2230 						page, fsdata);
2231 		if (err < 0)
2232 			goto out;
2233 		BUG_ON(err != len);
2234 		err = 0;
2235 	}
2236 out:
2237 	return err;
2238 }
2239 
2240 /*
2241  * For moronic filesystems that do not allow holes in file.
2242  * We may have to extend the file.
2243  */
cont_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned flags,struct page ** pagep,void ** fsdata,get_block_t * get_block,loff_t * bytes)2244 int cont_write_begin(struct file *file, struct address_space *mapping,
2245 			loff_t pos, unsigned len, unsigned flags,
2246 			struct page **pagep, void **fsdata,
2247 			get_block_t *get_block, loff_t *bytes)
2248 {
2249 	struct inode *inode = mapping->host;
2250 	unsigned blocksize = 1 << inode->i_blkbits;
2251 	unsigned zerofrom;
2252 	int err;
2253 
2254 	err = cont_expand_zero(file, mapping, pos, bytes);
2255 	if (err)
2256 		return err;
2257 
2258 	zerofrom = *bytes & ~PAGE_CACHE_MASK;
2259 	if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2260 		*bytes |= (blocksize-1);
2261 		(*bytes)++;
2262 	}
2263 
2264 	return block_write_begin(mapping, pos, len, flags, pagep, get_block);
2265 }
2266 EXPORT_SYMBOL(cont_write_begin);
2267 
block_commit_write(struct page * page,unsigned from,unsigned to)2268 int block_commit_write(struct page *page, unsigned from, unsigned to)
2269 {
2270 	struct inode *inode = page->mapping->host;
2271 	__block_commit_write(inode,page,from,to);
2272 	return 0;
2273 }
2274 EXPORT_SYMBOL(block_commit_write);
2275 
2276 /*
2277  * block_page_mkwrite() is not allowed to change the file size as it gets
2278  * called from a page fault handler when a page is first dirtied. Hence we must
2279  * be careful to check for EOF conditions here. We set the page up correctly
2280  * for a written page which means we get ENOSPC checking when writing into
2281  * holes and correct delalloc and unwritten extent mapping on filesystems that
2282  * support these features.
2283  *
2284  * We are not allowed to take the i_mutex here so we have to play games to
2285  * protect against truncate races as the page could now be beyond EOF.  Because
2286  * truncate writes the inode size before removing pages, once we have the
2287  * page lock we can determine safely if the page is beyond EOF. If it is not
2288  * beyond EOF, then the page is guaranteed safe against truncation until we
2289  * unlock the page.
2290  *
2291  * Direct callers of this function should call vfs_check_frozen() so that page
2292  * fault does not busyloop until the fs is thawed.
2293  */
__block_page_mkwrite(struct vm_area_struct * vma,struct vm_fault * vmf,get_block_t get_block)2294 int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2295 			 get_block_t get_block)
2296 {
2297 	struct page *page = vmf->page;
2298 	struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2299 	unsigned long end;
2300 	loff_t size;
2301 	int ret;
2302 
2303 	lock_page(page);
2304 	size = i_size_read(inode);
2305 	if ((page->mapping != inode->i_mapping) ||
2306 	    (page_offset(page) > size)) {
2307 		/* We overload EFAULT to mean page got truncated */
2308 		ret = -EFAULT;
2309 		goto out_unlock;
2310 	}
2311 
2312 	/* page is wholly or partially inside EOF */
2313 	if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2314 		end = size & ~PAGE_CACHE_MASK;
2315 	else
2316 		end = PAGE_CACHE_SIZE;
2317 
2318 	ret = __block_write_begin(page, 0, end, get_block);
2319 	if (!ret)
2320 		ret = block_commit_write(page, 0, end);
2321 
2322 	if (unlikely(ret < 0))
2323 		goto out_unlock;
2324 	/*
2325 	 * Freezing in progress? We check after the page is marked dirty and
2326 	 * with page lock held so if the test here fails, we are sure freezing
2327 	 * code will wait during syncing until the page fault is done - at that
2328 	 * point page will be dirty and unlocked so freezing code will write it
2329 	 * and writeprotect it again.
2330 	 */
2331 	set_page_dirty(page);
2332 	if (inode->i_sb->s_frozen != SB_UNFROZEN) {
2333 		ret = -EAGAIN;
2334 		goto out_unlock;
2335 	}
2336 	wait_on_page_writeback(page);
2337 	return 0;
2338 out_unlock:
2339 	unlock_page(page);
2340 	return ret;
2341 }
2342 EXPORT_SYMBOL(__block_page_mkwrite);
2343 
block_page_mkwrite(struct vm_area_struct * vma,struct vm_fault * vmf,get_block_t get_block)2344 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2345 		   get_block_t get_block)
2346 {
2347 	int ret;
2348 	struct super_block *sb = vma->vm_file->f_path.dentry->d_inode->i_sb;
2349 
2350 	/*
2351 	 * This check is racy but catches the common case. The check in
2352 	 * __block_page_mkwrite() is reliable.
2353 	 */
2354 	vfs_check_frozen(sb, SB_FREEZE_WRITE);
2355 	ret = __block_page_mkwrite(vma, vmf, get_block);
2356 	return block_page_mkwrite_return(ret);
2357 }
2358 EXPORT_SYMBOL(block_page_mkwrite);
2359 
2360 /*
2361  * nobh_write_begin()'s prereads are special: the buffer_heads are freed
2362  * immediately, while under the page lock.  So it needs a special end_io
2363  * handler which does not touch the bh after unlocking it.
2364  */
end_buffer_read_nobh(struct buffer_head * bh,int uptodate)2365 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2366 {
2367 	__end_buffer_read_notouch(bh, uptodate);
2368 }
2369 
2370 /*
2371  * Attach the singly-linked list of buffers created by nobh_write_begin, to
2372  * the page (converting it to circular linked list and taking care of page
2373  * dirty races).
2374  */
attach_nobh_buffers(struct page * page,struct buffer_head * head)2375 static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2376 {
2377 	struct buffer_head *bh;
2378 
2379 	BUG_ON(!PageLocked(page));
2380 
2381 	spin_lock(&page->mapping->private_lock);
2382 	bh = head;
2383 	do {
2384 		if (PageDirty(page))
2385 			set_buffer_dirty(bh);
2386 		if (!bh->b_this_page)
2387 			bh->b_this_page = head;
2388 		bh = bh->b_this_page;
2389 	} while (bh != head);
2390 	attach_page_buffers(page, head);
2391 	spin_unlock(&page->mapping->private_lock);
2392 }
2393 
2394 /*
2395  * On entry, the page is fully not uptodate.
2396  * On exit the page is fully uptodate in the areas outside (from,to)
2397  * The filesystem needs to handle block truncation upon failure.
2398  */
nobh_write_begin(struct address_space * mapping,loff_t pos,unsigned len,unsigned flags,struct page ** pagep,void ** fsdata,get_block_t * get_block)2399 int nobh_write_begin(struct address_space *mapping,
2400 			loff_t pos, unsigned len, unsigned flags,
2401 			struct page **pagep, void **fsdata,
2402 			get_block_t *get_block)
2403 {
2404 	struct inode *inode = mapping->host;
2405 	const unsigned blkbits = inode->i_blkbits;
2406 	const unsigned blocksize = 1 << blkbits;
2407 	struct buffer_head *head, *bh;
2408 	struct page *page;
2409 	pgoff_t index;
2410 	unsigned from, to;
2411 	unsigned block_in_page;
2412 	unsigned block_start, block_end;
2413 	sector_t block_in_file;
2414 	int nr_reads = 0;
2415 	int ret = 0;
2416 	int is_mapped_to_disk = 1;
2417 
2418 	index = pos >> PAGE_CACHE_SHIFT;
2419 	from = pos & (PAGE_CACHE_SIZE - 1);
2420 	to = from + len;
2421 
2422 	page = grab_cache_page_write_begin(mapping, index, flags);
2423 	if (!page)
2424 		return -ENOMEM;
2425 	*pagep = page;
2426 	*fsdata = NULL;
2427 
2428 	if (page_has_buffers(page)) {
2429 		ret = __block_write_begin(page, pos, len, get_block);
2430 		if (unlikely(ret))
2431 			goto out_release;
2432 		return ret;
2433 	}
2434 
2435 	if (PageMappedToDisk(page))
2436 		return 0;
2437 
2438 	/*
2439 	 * Allocate buffers so that we can keep track of state, and potentially
2440 	 * attach them to the page if an error occurs. In the common case of
2441 	 * no error, they will just be freed again without ever being attached
2442 	 * to the page (which is all OK, because we're under the page lock).
2443 	 *
2444 	 * Be careful: the buffer linked list is a NULL terminated one, rather
2445 	 * than the circular one we're used to.
2446 	 */
2447 	head = alloc_page_buffers(page, blocksize, 0);
2448 	if (!head) {
2449 		ret = -ENOMEM;
2450 		goto out_release;
2451 	}
2452 
2453 	block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2454 
2455 	/*
2456 	 * We loop across all blocks in the page, whether or not they are
2457 	 * part of the affected region.  This is so we can discover if the
2458 	 * page is fully mapped-to-disk.
2459 	 */
2460 	for (block_start = 0, block_in_page = 0, bh = head;
2461 		  block_start < PAGE_CACHE_SIZE;
2462 		  block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2463 		int create;
2464 
2465 		block_end = block_start + blocksize;
2466 		bh->b_state = 0;
2467 		create = 1;
2468 		if (block_start >= to)
2469 			create = 0;
2470 		ret = get_block(inode, block_in_file + block_in_page,
2471 					bh, create);
2472 		if (ret)
2473 			goto failed;
2474 		if (!buffer_mapped(bh))
2475 			is_mapped_to_disk = 0;
2476 		if (buffer_new(bh))
2477 			unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2478 		if (PageUptodate(page)) {
2479 			set_buffer_uptodate(bh);
2480 			continue;
2481 		}
2482 		if (buffer_new(bh) || !buffer_mapped(bh)) {
2483 			zero_user_segments(page, block_start, from,
2484 							to, block_end);
2485 			continue;
2486 		}
2487 		if (buffer_uptodate(bh))
2488 			continue;	/* reiserfs does this */
2489 		if (block_start < from || block_end > to) {
2490 			lock_buffer(bh);
2491 			bh->b_end_io = end_buffer_read_nobh;
2492 			submit_bh(READ, bh);
2493 			nr_reads++;
2494 		}
2495 	}
2496 
2497 	if (nr_reads) {
2498 		/*
2499 		 * The page is locked, so these buffers are protected from
2500 		 * any VM or truncate activity.  Hence we don't need to care
2501 		 * for the buffer_head refcounts.
2502 		 */
2503 		for (bh = head; bh; bh = bh->b_this_page) {
2504 			wait_on_buffer(bh);
2505 			if (!buffer_uptodate(bh))
2506 				ret = -EIO;
2507 		}
2508 		if (ret)
2509 			goto failed;
2510 	}
2511 
2512 	if (is_mapped_to_disk)
2513 		SetPageMappedToDisk(page);
2514 
2515 	*fsdata = head; /* to be released by nobh_write_end */
2516 
2517 	return 0;
2518 
2519 failed:
2520 	BUG_ON(!ret);
2521 	/*
2522 	 * Error recovery is a bit difficult. We need to zero out blocks that
2523 	 * were newly allocated, and dirty them to ensure they get written out.
2524 	 * Buffers need to be attached to the page at this point, otherwise
2525 	 * the handling of potential IO errors during writeout would be hard
2526 	 * (could try doing synchronous writeout, but what if that fails too?)
2527 	 */
2528 	attach_nobh_buffers(page, head);
2529 	page_zero_new_buffers(page, from, to);
2530 
2531 out_release:
2532 	unlock_page(page);
2533 	page_cache_release(page);
2534 	*pagep = NULL;
2535 
2536 	return ret;
2537 }
2538 EXPORT_SYMBOL(nobh_write_begin);
2539 
nobh_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct page * page,void * fsdata)2540 int nobh_write_end(struct file *file, struct address_space *mapping,
2541 			loff_t pos, unsigned len, unsigned copied,
2542 			struct page *page, void *fsdata)
2543 {
2544 	struct inode *inode = page->mapping->host;
2545 	struct buffer_head *head = fsdata;
2546 	struct buffer_head *bh;
2547 	BUG_ON(fsdata != NULL && page_has_buffers(page));
2548 
2549 	if (unlikely(copied < len) && head)
2550 		attach_nobh_buffers(page, head);
2551 	if (page_has_buffers(page))
2552 		return generic_write_end(file, mapping, pos, len,
2553 					copied, page, fsdata);
2554 
2555 	SetPageUptodate(page);
2556 	set_page_dirty(page);
2557 	if (pos+copied > inode->i_size) {
2558 		i_size_write(inode, pos+copied);
2559 		mark_inode_dirty(inode);
2560 	}
2561 
2562 	unlock_page(page);
2563 	page_cache_release(page);
2564 
2565 	while (head) {
2566 		bh = head;
2567 		head = head->b_this_page;
2568 		free_buffer_head(bh);
2569 	}
2570 
2571 	return copied;
2572 }
2573 EXPORT_SYMBOL(nobh_write_end);
2574 
2575 /*
2576  * nobh_writepage() - based on block_full_write_page() except
2577  * that it tries to operate without attaching bufferheads to
2578  * the page.
2579  */
nobh_writepage(struct page * page,get_block_t * get_block,struct writeback_control * wbc)2580 int nobh_writepage(struct page *page, get_block_t *get_block,
2581 			struct writeback_control *wbc)
2582 {
2583 	struct inode * const inode = page->mapping->host;
2584 	loff_t i_size = i_size_read(inode);
2585 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2586 	unsigned offset;
2587 	int ret;
2588 
2589 	/* Is the page fully inside i_size? */
2590 	if (page->index < end_index)
2591 		goto out;
2592 
2593 	/* Is the page fully outside i_size? (truncate in progress) */
2594 	offset = i_size & (PAGE_CACHE_SIZE-1);
2595 	if (page->index >= end_index+1 || !offset) {
2596 		/*
2597 		 * The page may have dirty, unmapped buffers.  For example,
2598 		 * they may have been added in ext3_writepage().  Make them
2599 		 * freeable here, so the page does not leak.
2600 		 */
2601 #if 0
2602 		/* Not really sure about this  - do we need this ? */
2603 		if (page->mapping->a_ops->invalidatepage)
2604 			page->mapping->a_ops->invalidatepage(page, offset);
2605 #endif
2606 		unlock_page(page);
2607 		return 0; /* don't care */
2608 	}
2609 
2610 	/*
2611 	 * The page straddles i_size.  It must be zeroed out on each and every
2612 	 * writepage invocation because it may be mmapped.  "A file is mapped
2613 	 * in multiples of the page size.  For a file that is not a multiple of
2614 	 * the  page size, the remaining memory is zeroed when mapped, and
2615 	 * writes to that region are not written out to the file."
2616 	 */
2617 	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2618 out:
2619 	ret = mpage_writepage(page, get_block, wbc);
2620 	if (ret == -EAGAIN)
2621 		ret = __block_write_full_page(inode, page, get_block, wbc,
2622 					      end_buffer_async_write);
2623 	return ret;
2624 }
2625 EXPORT_SYMBOL(nobh_writepage);
2626 
nobh_truncate_page(struct address_space * mapping,loff_t from,get_block_t * get_block)2627 int nobh_truncate_page(struct address_space *mapping,
2628 			loff_t from, get_block_t *get_block)
2629 {
2630 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
2631 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
2632 	unsigned blocksize;
2633 	sector_t iblock;
2634 	unsigned length, pos;
2635 	struct inode *inode = mapping->host;
2636 	struct page *page;
2637 	struct buffer_head map_bh;
2638 	int err;
2639 
2640 	blocksize = 1 << inode->i_blkbits;
2641 	length = offset & (blocksize - 1);
2642 
2643 	/* Block boundary? Nothing to do */
2644 	if (!length)
2645 		return 0;
2646 
2647 	length = blocksize - length;
2648 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2649 
2650 	page = grab_cache_page(mapping, index);
2651 	err = -ENOMEM;
2652 	if (!page)
2653 		goto out;
2654 
2655 	if (page_has_buffers(page)) {
2656 has_buffers:
2657 		unlock_page(page);
2658 		page_cache_release(page);
2659 		return block_truncate_page(mapping, from, get_block);
2660 	}
2661 
2662 	/* Find the buffer that contains "offset" */
2663 	pos = blocksize;
2664 	while (offset >= pos) {
2665 		iblock++;
2666 		pos += blocksize;
2667 	}
2668 
2669 	map_bh.b_size = blocksize;
2670 	map_bh.b_state = 0;
2671 	err = get_block(inode, iblock, &map_bh, 0);
2672 	if (err)
2673 		goto unlock;
2674 	/* unmapped? It's a hole - nothing to do */
2675 	if (!buffer_mapped(&map_bh))
2676 		goto unlock;
2677 
2678 	/* Ok, it's mapped. Make sure it's up-to-date */
2679 	if (!PageUptodate(page)) {
2680 		err = mapping->a_ops->readpage(NULL, page);
2681 		if (err) {
2682 			page_cache_release(page);
2683 			goto out;
2684 		}
2685 		lock_page(page);
2686 		if (!PageUptodate(page)) {
2687 			err = -EIO;
2688 			goto unlock;
2689 		}
2690 		if (page_has_buffers(page))
2691 			goto has_buffers;
2692 	}
2693 	zero_user(page, offset, length);
2694 	set_page_dirty(page);
2695 	err = 0;
2696 
2697 unlock:
2698 	unlock_page(page);
2699 	page_cache_release(page);
2700 out:
2701 	return err;
2702 }
2703 EXPORT_SYMBOL(nobh_truncate_page);
2704 
block_truncate_page(struct address_space * mapping,loff_t from,get_block_t * get_block)2705 int block_truncate_page(struct address_space *mapping,
2706 			loff_t from, get_block_t *get_block)
2707 {
2708 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
2709 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
2710 	unsigned blocksize;
2711 	sector_t iblock;
2712 	unsigned length, pos;
2713 	struct inode *inode = mapping->host;
2714 	struct page *page;
2715 	struct buffer_head *bh;
2716 	int err;
2717 
2718 	blocksize = 1 << inode->i_blkbits;
2719 	length = offset & (blocksize - 1);
2720 
2721 	/* Block boundary? Nothing to do */
2722 	if (!length)
2723 		return 0;
2724 
2725 	length = blocksize - length;
2726 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2727 
2728 	page = grab_cache_page(mapping, index);
2729 	err = -ENOMEM;
2730 	if (!page)
2731 		goto out;
2732 
2733 	if (!page_has_buffers(page))
2734 		create_empty_buffers(page, blocksize, 0);
2735 
2736 	/* Find the buffer that contains "offset" */
2737 	bh = page_buffers(page);
2738 	pos = blocksize;
2739 	while (offset >= pos) {
2740 		bh = bh->b_this_page;
2741 		iblock++;
2742 		pos += blocksize;
2743 	}
2744 
2745 	err = 0;
2746 	if (!buffer_mapped(bh)) {
2747 		WARN_ON(bh->b_size != blocksize);
2748 		err = get_block(inode, iblock, bh, 0);
2749 		if (err)
2750 			goto unlock;
2751 		/* unmapped? It's a hole - nothing to do */
2752 		if (!buffer_mapped(bh))
2753 			goto unlock;
2754 	}
2755 
2756 	/* Ok, it's mapped. Make sure it's up-to-date */
2757 	if (PageUptodate(page))
2758 		set_buffer_uptodate(bh);
2759 
2760 	if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2761 		err = -EIO;
2762 		ll_rw_block(READ, 1, &bh);
2763 		wait_on_buffer(bh);
2764 		/* Uhhuh. Read error. Complain and punt. */
2765 		if (!buffer_uptodate(bh))
2766 			goto unlock;
2767 	}
2768 
2769 	zero_user(page, offset, length);
2770 	mark_buffer_dirty(bh);
2771 	err = 0;
2772 
2773 unlock:
2774 	unlock_page(page);
2775 	page_cache_release(page);
2776 out:
2777 	return err;
2778 }
2779 EXPORT_SYMBOL(block_truncate_page);
2780 
2781 /*
2782  * The generic ->writepage function for buffer-backed address_spaces
2783  * this form passes in the end_io handler used to finish the IO.
2784  */
block_write_full_page_endio(struct page * page,get_block_t * get_block,struct writeback_control * wbc,bh_end_io_t * handler)2785 int block_write_full_page_endio(struct page *page, get_block_t *get_block,
2786 			struct writeback_control *wbc, bh_end_io_t *handler)
2787 {
2788 	struct inode * const inode = page->mapping->host;
2789 	loff_t i_size = i_size_read(inode);
2790 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2791 	unsigned offset;
2792 
2793 	/* Is the page fully inside i_size? */
2794 	if (page->index < end_index)
2795 		return __block_write_full_page(inode, page, get_block, wbc,
2796 					       handler);
2797 
2798 	/* Is the page fully outside i_size? (truncate in progress) */
2799 	offset = i_size & (PAGE_CACHE_SIZE-1);
2800 	if (page->index >= end_index+1 || !offset) {
2801 		/*
2802 		 * The page may have dirty, unmapped buffers.  For example,
2803 		 * they may have been added in ext3_writepage().  Make them
2804 		 * freeable here, so the page does not leak.
2805 		 */
2806 		do_invalidatepage(page, 0);
2807 		unlock_page(page);
2808 		return 0; /* don't care */
2809 	}
2810 
2811 	/*
2812 	 * The page straddles i_size.  It must be zeroed out on each and every
2813 	 * writepage invocation because it may be mmapped.  "A file is mapped
2814 	 * in multiples of the page size.  For a file that is not a multiple of
2815 	 * the  page size, the remaining memory is zeroed when mapped, and
2816 	 * writes to that region are not written out to the file."
2817 	 */
2818 	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2819 	return __block_write_full_page(inode, page, get_block, wbc, handler);
2820 }
2821 EXPORT_SYMBOL(block_write_full_page_endio);
2822 
2823 /*
2824  * The generic ->writepage function for buffer-backed address_spaces
2825  */
block_write_full_page(struct page * page,get_block_t * get_block,struct writeback_control * wbc)2826 int block_write_full_page(struct page *page, get_block_t *get_block,
2827 			struct writeback_control *wbc)
2828 {
2829 	return block_write_full_page_endio(page, get_block, wbc,
2830 					   end_buffer_async_write);
2831 }
2832 EXPORT_SYMBOL(block_write_full_page);
2833 
generic_block_bmap(struct address_space * mapping,sector_t block,get_block_t * get_block)2834 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2835 			    get_block_t *get_block)
2836 {
2837 	struct buffer_head tmp;
2838 	struct inode *inode = mapping->host;
2839 	tmp.b_state = 0;
2840 	tmp.b_blocknr = 0;
2841 	tmp.b_size = 1 << inode->i_blkbits;
2842 	get_block(inode, block, &tmp, 0);
2843 	return tmp.b_blocknr;
2844 }
2845 EXPORT_SYMBOL(generic_block_bmap);
2846 
end_bio_bh_io_sync(struct bio * bio,int err)2847 static void end_bio_bh_io_sync(struct bio *bio, int err)
2848 {
2849 	struct buffer_head *bh = bio->bi_private;
2850 
2851 	if (err == -EOPNOTSUPP) {
2852 		set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2853 	}
2854 
2855 	if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
2856 		set_bit(BH_Quiet, &bh->b_state);
2857 
2858 	bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2859 	bio_put(bio);
2860 }
2861 
submit_bh(int rw,struct buffer_head * bh)2862 int submit_bh(int rw, struct buffer_head * bh)
2863 {
2864 	struct bio *bio;
2865 	int ret = 0;
2866 
2867 	BUG_ON(!buffer_locked(bh));
2868 	BUG_ON(!buffer_mapped(bh));
2869 	BUG_ON(!bh->b_end_io);
2870 	BUG_ON(buffer_delay(bh));
2871 	BUG_ON(buffer_unwritten(bh));
2872 
2873 	/*
2874 	 * Only clear out a write error when rewriting
2875 	 */
2876 	if (test_set_buffer_req(bh) && (rw & WRITE))
2877 		clear_buffer_write_io_error(bh);
2878 
2879 	/*
2880 	 * from here on down, it's all bio -- do the initial mapping,
2881 	 * submit_bio -> generic_make_request may further map this bio around
2882 	 */
2883 	bio = bio_alloc(GFP_NOIO, 1);
2884 
2885 	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2886 	bio->bi_bdev = bh->b_bdev;
2887 	bio->bi_io_vec[0].bv_page = bh->b_page;
2888 	bio->bi_io_vec[0].bv_len = bh->b_size;
2889 	bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2890 
2891 	bio->bi_vcnt = 1;
2892 	bio->bi_idx = 0;
2893 	bio->bi_size = bh->b_size;
2894 
2895 	bio->bi_end_io = end_bio_bh_io_sync;
2896 	bio->bi_private = bh;
2897 
2898 	bio_get(bio);
2899 	submit_bio(rw, bio);
2900 
2901 	if (bio_flagged(bio, BIO_EOPNOTSUPP))
2902 		ret = -EOPNOTSUPP;
2903 
2904 	bio_put(bio);
2905 	return ret;
2906 }
2907 EXPORT_SYMBOL(submit_bh);
2908 
2909 /**
2910  * ll_rw_block: low-level access to block devices (DEPRECATED)
2911  * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
2912  * @nr: number of &struct buffer_heads in the array
2913  * @bhs: array of pointers to &struct buffer_head
2914  *
2915  * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2916  * requests an I/O operation on them, either a %READ or a %WRITE.  The third
2917  * %READA option is described in the documentation for generic_make_request()
2918  * which ll_rw_block() calls.
2919  *
2920  * This function drops any buffer that it cannot get a lock on (with the
2921  * BH_Lock state bit), any buffer that appears to be clean when doing a write
2922  * request, and any buffer that appears to be up-to-date when doing read
2923  * request.  Further it marks as clean buffers that are processed for
2924  * writing (the buffer cache won't assume that they are actually clean
2925  * until the buffer gets unlocked).
2926  *
2927  * ll_rw_block sets b_end_io to simple completion handler that marks
2928  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2929  * any waiters.
2930  *
2931  * All of the buffers must be for the same device, and must also be a
2932  * multiple of the current approved size for the device.
2933  */
ll_rw_block(int rw,int nr,struct buffer_head * bhs[])2934 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2935 {
2936 	int i;
2937 
2938 	for (i = 0; i < nr; i++) {
2939 		struct buffer_head *bh = bhs[i];
2940 
2941 		if (!trylock_buffer(bh))
2942 			continue;
2943 		if (rw == WRITE) {
2944 			if (test_clear_buffer_dirty(bh)) {
2945 				bh->b_end_io = end_buffer_write_sync;
2946 				get_bh(bh);
2947 				submit_bh(WRITE, bh);
2948 				continue;
2949 			}
2950 		} else {
2951 			if (!buffer_uptodate(bh)) {
2952 				bh->b_end_io = end_buffer_read_sync;
2953 				get_bh(bh);
2954 				submit_bh(rw, bh);
2955 				continue;
2956 			}
2957 		}
2958 		unlock_buffer(bh);
2959 	}
2960 }
2961 EXPORT_SYMBOL(ll_rw_block);
2962 
write_dirty_buffer(struct buffer_head * bh,int rw)2963 void write_dirty_buffer(struct buffer_head *bh, int rw)
2964 {
2965 	lock_buffer(bh);
2966 	if (!test_clear_buffer_dirty(bh)) {
2967 		unlock_buffer(bh);
2968 		return;
2969 	}
2970 	bh->b_end_io = end_buffer_write_sync;
2971 	get_bh(bh);
2972 	submit_bh(rw, bh);
2973 }
2974 EXPORT_SYMBOL(write_dirty_buffer);
2975 
2976 /*
2977  * For a data-integrity writeout, we need to wait upon any in-progress I/O
2978  * and then start new I/O and then wait upon it.  The caller must have a ref on
2979  * the buffer_head.
2980  */
__sync_dirty_buffer(struct buffer_head * bh,int rw)2981 int __sync_dirty_buffer(struct buffer_head *bh, int rw)
2982 {
2983 	int ret = 0;
2984 
2985 	WARN_ON(atomic_read(&bh->b_count) < 1);
2986 	lock_buffer(bh);
2987 	if (test_clear_buffer_dirty(bh)) {
2988 		get_bh(bh);
2989 		bh->b_end_io = end_buffer_write_sync;
2990 		ret = submit_bh(rw, bh);
2991 		wait_on_buffer(bh);
2992 		if (!ret && !buffer_uptodate(bh))
2993 			ret = -EIO;
2994 	} else {
2995 		unlock_buffer(bh);
2996 	}
2997 	return ret;
2998 }
2999 EXPORT_SYMBOL(__sync_dirty_buffer);
3000 
sync_dirty_buffer(struct buffer_head * bh)3001 int sync_dirty_buffer(struct buffer_head *bh)
3002 {
3003 	return __sync_dirty_buffer(bh, WRITE_SYNC);
3004 }
3005 EXPORT_SYMBOL(sync_dirty_buffer);
3006 
3007 /*
3008  * try_to_free_buffers() checks if all the buffers on this particular page
3009  * are unused, and releases them if so.
3010  *
3011  * Exclusion against try_to_free_buffers may be obtained by either
3012  * locking the page or by holding its mapping's private_lock.
3013  *
3014  * If the page is dirty but all the buffers are clean then we need to
3015  * be sure to mark the page clean as well.  This is because the page
3016  * may be against a block device, and a later reattachment of buffers
3017  * to a dirty page will set *all* buffers dirty.  Which would corrupt
3018  * filesystem data on the same device.
3019  *
3020  * The same applies to regular filesystem pages: if all the buffers are
3021  * clean then we set the page clean and proceed.  To do that, we require
3022  * total exclusion from __set_page_dirty_buffers().  That is obtained with
3023  * private_lock.
3024  *
3025  * try_to_free_buffers() is non-blocking.
3026  */
buffer_busy(struct buffer_head * bh)3027 static inline int buffer_busy(struct buffer_head *bh)
3028 {
3029 	return atomic_read(&bh->b_count) |
3030 		(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3031 }
3032 
3033 static int
drop_buffers(struct page * page,struct buffer_head ** buffers_to_free)3034 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3035 {
3036 	struct buffer_head *head = page_buffers(page);
3037 	struct buffer_head *bh;
3038 
3039 	bh = head;
3040 	do {
3041 		if (buffer_write_io_error(bh) && page->mapping)
3042 			set_bit(AS_EIO, &page->mapping->flags);
3043 		if (buffer_busy(bh))
3044 			goto failed;
3045 		bh = bh->b_this_page;
3046 	} while (bh != head);
3047 
3048 	do {
3049 		struct buffer_head *next = bh->b_this_page;
3050 
3051 		if (bh->b_assoc_map)
3052 			__remove_assoc_queue(bh);
3053 		bh = next;
3054 	} while (bh != head);
3055 	*buffers_to_free = head;
3056 	__clear_page_buffers(page);
3057 	return 1;
3058 failed:
3059 	return 0;
3060 }
3061 
try_to_free_buffers(struct page * page)3062 int try_to_free_buffers(struct page *page)
3063 {
3064 	struct address_space * const mapping = page->mapping;
3065 	struct buffer_head *buffers_to_free = NULL;
3066 	int ret = 0;
3067 
3068 	BUG_ON(!PageLocked(page));
3069 	if (PageWriteback(page))
3070 		return 0;
3071 
3072 	if (mapping == NULL) {		/* can this still happen? */
3073 		ret = drop_buffers(page, &buffers_to_free);
3074 		goto out;
3075 	}
3076 
3077 	spin_lock(&mapping->private_lock);
3078 	ret = drop_buffers(page, &buffers_to_free);
3079 
3080 	/*
3081 	 * If the filesystem writes its buffers by hand (eg ext3)
3082 	 * then we can have clean buffers against a dirty page.  We
3083 	 * clean the page here; otherwise the VM will never notice
3084 	 * that the filesystem did any IO at all.
3085 	 *
3086 	 * Also, during truncate, discard_buffer will have marked all
3087 	 * the page's buffers clean.  We discover that here and clean
3088 	 * the page also.
3089 	 *
3090 	 * private_lock must be held over this entire operation in order
3091 	 * to synchronise against __set_page_dirty_buffers and prevent the
3092 	 * dirty bit from being lost.
3093 	 */
3094 	if (ret)
3095 		cancel_dirty_page(page, PAGE_CACHE_SIZE);
3096 	spin_unlock(&mapping->private_lock);
3097 out:
3098 	if (buffers_to_free) {
3099 		struct buffer_head *bh = buffers_to_free;
3100 
3101 		do {
3102 			struct buffer_head *next = bh->b_this_page;
3103 			free_buffer_head(bh);
3104 			bh = next;
3105 		} while (bh != buffers_to_free);
3106 	}
3107 	return ret;
3108 }
3109 EXPORT_SYMBOL(try_to_free_buffers);
3110 
3111 /*
3112  * There are no bdflush tunables left.  But distributions are
3113  * still running obsolete flush daemons, so we terminate them here.
3114  *
3115  * Use of bdflush() is deprecated and will be removed in a future kernel.
3116  * The `flush-X' kernel threads fully replace bdflush daemons and this call.
3117  */
SYSCALL_DEFINE2(bdflush,int,func,long,data)3118 SYSCALL_DEFINE2(bdflush, int, func, long, data)
3119 {
3120 	static int msg_count;
3121 
3122 	if (!capable(CAP_SYS_ADMIN))
3123 		return -EPERM;
3124 
3125 	if (msg_count < 5) {
3126 		msg_count++;
3127 		printk(KERN_INFO
3128 			"warning: process `%s' used the obsolete bdflush"
3129 			" system call\n", current->comm);
3130 		printk(KERN_INFO "Fix your initscripts?\n");
3131 	}
3132 
3133 	if (func == 1)
3134 		do_exit(0);
3135 	return 0;
3136 }
3137 
3138 /*
3139  * Buffer-head allocation
3140  */
3141 static struct kmem_cache *bh_cachep;
3142 
3143 /*
3144  * Once the number of bh's in the machine exceeds this level, we start
3145  * stripping them in writeback.
3146  */
3147 static int max_buffer_heads;
3148 
3149 int buffer_heads_over_limit;
3150 
3151 struct bh_accounting {
3152 	int nr;			/* Number of live bh's */
3153 	int ratelimit;		/* Limit cacheline bouncing */
3154 };
3155 
3156 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3157 
recalc_bh_state(void)3158 static void recalc_bh_state(void)
3159 {
3160 	int i;
3161 	int tot = 0;
3162 
3163 	if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
3164 		return;
3165 	__this_cpu_write(bh_accounting.ratelimit, 0);
3166 	for_each_online_cpu(i)
3167 		tot += per_cpu(bh_accounting, i).nr;
3168 	buffer_heads_over_limit = (tot > max_buffer_heads);
3169 }
3170 
alloc_buffer_head(gfp_t gfp_flags)3171 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3172 {
3173 	struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
3174 	if (ret) {
3175 		INIT_LIST_HEAD(&ret->b_assoc_buffers);
3176 		preempt_disable();
3177 		__this_cpu_inc(bh_accounting.nr);
3178 		recalc_bh_state();
3179 		preempt_enable();
3180 	}
3181 	return ret;
3182 }
3183 EXPORT_SYMBOL(alloc_buffer_head);
3184 
free_buffer_head(struct buffer_head * bh)3185 void free_buffer_head(struct buffer_head *bh)
3186 {
3187 	BUG_ON(!list_empty(&bh->b_assoc_buffers));
3188 	kmem_cache_free(bh_cachep, bh);
3189 	preempt_disable();
3190 	__this_cpu_dec(bh_accounting.nr);
3191 	recalc_bh_state();
3192 	preempt_enable();
3193 }
3194 EXPORT_SYMBOL(free_buffer_head);
3195 
buffer_exit_cpu(int cpu)3196 static void buffer_exit_cpu(int cpu)
3197 {
3198 	int i;
3199 	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3200 
3201 	for (i = 0; i < BH_LRU_SIZE; i++) {
3202 		brelse(b->bhs[i]);
3203 		b->bhs[i] = NULL;
3204 	}
3205 	this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
3206 	per_cpu(bh_accounting, cpu).nr = 0;
3207 }
3208 
buffer_cpu_notify(struct notifier_block * self,unsigned long action,void * hcpu)3209 static int buffer_cpu_notify(struct notifier_block *self,
3210 			      unsigned long action, void *hcpu)
3211 {
3212 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
3213 		buffer_exit_cpu((unsigned long)hcpu);
3214 	return NOTIFY_OK;
3215 }
3216 
3217 /**
3218  * bh_uptodate_or_lock - Test whether the buffer is uptodate
3219  * @bh: struct buffer_head
3220  *
3221  * Return true if the buffer is up-to-date and false,
3222  * with the buffer locked, if not.
3223  */
bh_uptodate_or_lock(struct buffer_head * bh)3224 int bh_uptodate_or_lock(struct buffer_head *bh)
3225 {
3226 	if (!buffer_uptodate(bh)) {
3227 		lock_buffer(bh);
3228 		if (!buffer_uptodate(bh))
3229 			return 0;
3230 		unlock_buffer(bh);
3231 	}
3232 	return 1;
3233 }
3234 EXPORT_SYMBOL(bh_uptodate_or_lock);
3235 
3236 /**
3237  * bh_submit_read - Submit a locked buffer for reading
3238  * @bh: struct buffer_head
3239  *
3240  * Returns zero on success and -EIO on error.
3241  */
bh_submit_read(struct buffer_head * bh)3242 int bh_submit_read(struct buffer_head *bh)
3243 {
3244 	BUG_ON(!buffer_locked(bh));
3245 
3246 	if (buffer_uptodate(bh)) {
3247 		unlock_buffer(bh);
3248 		return 0;
3249 	}
3250 
3251 	get_bh(bh);
3252 	bh->b_end_io = end_buffer_read_sync;
3253 	submit_bh(READ, bh);
3254 	wait_on_buffer(bh);
3255 	if (buffer_uptodate(bh))
3256 		return 0;
3257 	return -EIO;
3258 }
3259 EXPORT_SYMBOL(bh_submit_read);
3260 
buffer_init(void)3261 void __init buffer_init(void)
3262 {
3263 	int nrpages;
3264 
3265 	bh_cachep = kmem_cache_create("buffer_head",
3266 			sizeof(struct buffer_head), 0,
3267 				(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3268 				SLAB_MEM_SPREAD),
3269 				NULL);
3270 
3271 	/*
3272 	 * Limit the bh occupancy to 10% of ZONE_NORMAL
3273 	 */
3274 	nrpages = (nr_free_buffer_pages() * 10) / 100;
3275 	max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3276 	hotcpu_notifier(buffer_cpu_notify, 0);
3277 }
3278