1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright (C) 2016 - 2020 Christoph Hellwig
6 */
7
8 #include <linux/init.h>
9 #include <linux/mm.h>
10 #include <linux/slab.h>
11 #include <linux/kmod.h>
12 #include <linux/major.h>
13 #include <linux/device_cgroup.h>
14 #include <linux/blkdev.h>
15 #include <linux/blk-integrity.h>
16 #include <linux/backing-dev.h>
17 #include <linux/module.h>
18 #include <linux/blkpg.h>
19 #include <linux/magic.h>
20 #include <linux/buffer_head.h>
21 #include <linux/swap.h>
22 #include <linux/writeback.h>
23 #include <linux/mount.h>
24 #include <linux/pseudo_fs.h>
25 #include <linux/uio.h>
26 #include <linux/namei.h>
27 #include <linux/security.h>
28 #include <linux/part_stat.h>
29 #include <linux/uaccess.h>
30 #include <linux/stat.h>
31 #include "../fs/internal.h"
32 #include "blk.h"
33
34 /* Should we allow writing to mounted block devices? */
35 static bool bdev_allow_write_mounted = IS_ENABLED(CONFIG_BLK_DEV_WRITE_MOUNTED);
36
37 struct bdev_inode {
38 struct block_device bdev;
39 struct inode vfs_inode;
40 };
41
BDEV_I(struct inode * inode)42 static inline struct bdev_inode *BDEV_I(struct inode *inode)
43 {
44 return container_of(inode, struct bdev_inode, vfs_inode);
45 }
46
BD_INODE(struct block_device * bdev)47 static inline struct inode *BD_INODE(struct block_device *bdev)
48 {
49 return &container_of(bdev, struct bdev_inode, bdev)->vfs_inode;
50 }
51
I_BDEV(struct inode * inode)52 struct block_device *I_BDEV(struct inode *inode)
53 {
54 return &BDEV_I(inode)->bdev;
55 }
56 EXPORT_SYMBOL(I_BDEV);
57
file_bdev(struct file * bdev_file)58 struct block_device *file_bdev(struct file *bdev_file)
59 {
60 return I_BDEV(bdev_file->f_mapping->host);
61 }
62 EXPORT_SYMBOL(file_bdev);
63
bdev_write_inode(struct block_device * bdev)64 static void bdev_write_inode(struct block_device *bdev)
65 {
66 struct inode *inode = BD_INODE(bdev);
67 int ret;
68
69 spin_lock(&inode->i_lock);
70 while (inode_state_read(inode) & I_DIRTY) {
71 spin_unlock(&inode->i_lock);
72 ret = write_inode_now(inode, true);
73 if (ret)
74 pr_warn_ratelimited(
75 "VFS: Dirty inode writeback failed for block device %pg (err=%d).\n",
76 bdev, ret);
77 spin_lock(&inode->i_lock);
78 }
79 spin_unlock(&inode->i_lock);
80 }
81
82 /* Kill _all_ buffers and pagecache , dirty or not.. */
kill_bdev(struct block_device * bdev)83 static void kill_bdev(struct block_device *bdev)
84 {
85 struct address_space *mapping = bdev->bd_mapping;
86
87 if (mapping_empty(mapping))
88 return;
89
90 invalidate_bh_lrus();
91 truncate_inode_pages(mapping, 0);
92 }
93
94 /* Invalidate clean unused buffers and pagecache. */
invalidate_bdev(struct block_device * bdev)95 void invalidate_bdev(struct block_device *bdev)
96 {
97 struct address_space *mapping = bdev->bd_mapping;
98
99 if (mapping->nrpages) {
100 invalidate_bh_lrus();
101 lru_add_drain_all(); /* make sure all lru add caches are flushed */
102 invalidate_mapping_pages(mapping, 0, -1);
103 }
104 }
105 EXPORT_SYMBOL(invalidate_bdev);
106
107 /*
108 * Drop all buffers & page cache for given bdev range. This function bails
109 * with error if bdev has other exclusive owner (such as filesystem).
110 */
truncate_bdev_range(struct block_device * bdev,blk_mode_t mode,loff_t lstart,loff_t lend)111 int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode,
112 loff_t lstart, loff_t lend)
113 {
114 /*
115 * If we don't hold exclusive handle for the device, upgrade to it
116 * while we discard the buffer cache to avoid discarding buffers
117 * under live filesystem.
118 */
119 if (!(mode & BLK_OPEN_EXCL)) {
120 int err = bd_prepare_to_claim(bdev, truncate_bdev_range, NULL);
121 if (err)
122 goto invalidate;
123 }
124
125 truncate_inode_pages_range(bdev->bd_mapping, lstart, lend);
126 if (!(mode & BLK_OPEN_EXCL))
127 bd_abort_claiming(bdev, truncate_bdev_range);
128 return 0;
129
130 invalidate:
131 /*
132 * Someone else has handle exclusively open. Try invalidating instead.
133 * The 'end' argument is inclusive so the rounding is safe.
134 */
135 return invalidate_inode_pages2_range(bdev->bd_mapping,
136 lstart >> PAGE_SHIFT,
137 lend >> PAGE_SHIFT);
138 }
139
set_init_blocksize(struct block_device * bdev)140 static void set_init_blocksize(struct block_device *bdev)
141 {
142 unsigned int bsize = bdev_logical_block_size(bdev);
143 loff_t size = i_size_read(BD_INODE(bdev));
144
145 while (bsize < PAGE_SIZE) {
146 if (size & bsize)
147 break;
148 bsize <<= 1;
149 }
150 BD_INODE(bdev)->i_blkbits = blksize_bits(bsize);
151 mapping_set_folio_min_order(BD_INODE(bdev)->i_mapping,
152 get_order(bsize));
153 }
154
155 /**
156 * bdev_validate_blocksize - check that this block size is acceptable
157 * @bdev: blockdevice to check
158 * @block_size: block size to check
159 *
160 * For block device users that do not use buffer heads or the block device
161 * page cache, make sure that this block size can be used with the device.
162 *
163 * Return: On success zero is returned, negative error code on failure.
164 */
bdev_validate_blocksize(struct block_device * bdev,int block_size)165 int bdev_validate_blocksize(struct block_device *bdev, int block_size)
166 {
167 if (blk_validate_block_size(block_size))
168 return -EINVAL;
169
170 /* Size cannot be smaller than the size supported by the device */
171 if (block_size < bdev_logical_block_size(bdev))
172 return -EINVAL;
173
174 return 0;
175 }
176 EXPORT_SYMBOL_GPL(bdev_validate_blocksize);
177
set_blocksize(struct file * file,int size)178 int set_blocksize(struct file *file, int size)
179 {
180 struct inode *inode = file->f_mapping->host;
181 struct block_device *bdev = I_BDEV(inode);
182 int ret;
183
184 ret = bdev_validate_blocksize(bdev, size);
185 if (ret)
186 return ret;
187
188 if (!file->private_data)
189 return -EINVAL;
190
191 /* Don't change the size if it is same as current */
192 if (inode->i_blkbits != blksize_bits(size)) {
193 /*
194 * Flush and truncate the pagecache before we reconfigure the
195 * mapping geometry because folio sizes are variable now. If a
196 * reader has already allocated a folio whose size is smaller
197 * than the new min_order but invokes readahead after the new
198 * min_order becomes visible, readahead will think there are
199 * "zero" blocks per folio and crash. Take the inode and
200 * invalidation locks to avoid racing with
201 * read/write/fallocate.
202 */
203 inode_lock(inode);
204 filemap_invalidate_lock(inode->i_mapping);
205
206 sync_blockdev(bdev);
207 kill_bdev(bdev);
208
209 inode->i_blkbits = blksize_bits(size);
210 mapping_set_folio_min_order(inode->i_mapping, get_order(size));
211 filemap_invalidate_unlock(inode->i_mapping);
212 inode_unlock(inode);
213 }
214 return 0;
215 }
216
217 EXPORT_SYMBOL(set_blocksize);
218
sb_validate_large_blocksize(struct super_block * sb,int size)219 static int sb_validate_large_blocksize(struct super_block *sb, int size)
220 {
221 const char *err_str = NULL;
222
223 if (!(sb->s_type->fs_flags & FS_LBS))
224 err_str = "not supported by filesystem";
225 else if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
226 err_str = "is only supported with CONFIG_TRANSPARENT_HUGEPAGE";
227
228 if (!err_str)
229 return 0;
230
231 pr_warn_ratelimited("%s: block size(%d) > page size(%lu) %s\n",
232 sb->s_type->name, size, PAGE_SIZE, err_str);
233 return -EINVAL;
234 }
235
sb_set_blocksize(struct super_block * sb,int size)236 int sb_set_blocksize(struct super_block *sb, int size)
237 {
238 if (size > PAGE_SIZE && sb_validate_large_blocksize(sb, size))
239 return 0;
240 if (set_blocksize(sb->s_bdev_file, size))
241 return 0;
242 /* If we get here, we know size is validated */
243 sb->s_blocksize = size;
244 sb->s_blocksize_bits = blksize_bits(size);
245 return sb->s_blocksize;
246 }
247
248 EXPORT_SYMBOL(sb_set_blocksize);
249
sb_min_blocksize(struct super_block * sb,int size)250 int __must_check sb_min_blocksize(struct super_block *sb, int size)
251 {
252 int minsize = bdev_logical_block_size(sb->s_bdev);
253 if (size < minsize)
254 size = minsize;
255 return sb_set_blocksize(sb, size);
256 }
257
258 EXPORT_SYMBOL(sb_min_blocksize);
259
sync_blockdev_nowait(struct block_device * bdev)260 int sync_blockdev_nowait(struct block_device *bdev)
261 {
262 if (!bdev)
263 return 0;
264 return filemap_flush(bdev->bd_mapping);
265 }
266 EXPORT_SYMBOL_GPL(sync_blockdev_nowait);
267
268 /*
269 * Write out and wait upon all the dirty data associated with a block
270 * device via its mapping. Does not take the superblock lock.
271 */
sync_blockdev(struct block_device * bdev)272 int sync_blockdev(struct block_device *bdev)
273 {
274 if (!bdev)
275 return 0;
276 return filemap_write_and_wait(bdev->bd_mapping);
277 }
278 EXPORT_SYMBOL(sync_blockdev);
279
sync_blockdev_range(struct block_device * bdev,loff_t lstart,loff_t lend)280 int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend)
281 {
282 return filemap_write_and_wait_range(bdev->bd_mapping,
283 lstart, lend);
284 }
285 EXPORT_SYMBOL(sync_blockdev_range);
286
287 /**
288 * bdev_freeze - lock a filesystem and force it into a consistent state
289 * @bdev: blockdevice to lock
290 *
291 * If a superblock is found on this device, we take the s_umount semaphore
292 * on it to make sure nobody unmounts until the snapshot creation is done.
293 * The reference counter (bd_fsfreeze_count) guarantees that only the last
294 * unfreeze process can unfreeze the frozen filesystem actually when multiple
295 * freeze requests arrive simultaneously. It counts up in bdev_freeze() and
296 * count down in bdev_thaw(). When it becomes 0, thaw_bdev() will unfreeze
297 * actually.
298 *
299 * Return: On success zero is returned, negative error code on failure.
300 */
bdev_freeze(struct block_device * bdev)301 int bdev_freeze(struct block_device *bdev)
302 {
303 int error = 0;
304
305 mutex_lock(&bdev->bd_fsfreeze_mutex);
306
307 if (atomic_inc_return(&bdev->bd_fsfreeze_count) > 1) {
308 mutex_unlock(&bdev->bd_fsfreeze_mutex);
309 return 0;
310 }
311
312 mutex_lock(&bdev->bd_holder_lock);
313 if (bdev->bd_holder_ops && bdev->bd_holder_ops->freeze) {
314 error = bdev->bd_holder_ops->freeze(bdev);
315 lockdep_assert_not_held(&bdev->bd_holder_lock);
316 } else {
317 mutex_unlock(&bdev->bd_holder_lock);
318 error = sync_blockdev(bdev);
319 }
320
321 if (error)
322 atomic_dec(&bdev->bd_fsfreeze_count);
323
324 mutex_unlock(&bdev->bd_fsfreeze_mutex);
325 return error;
326 }
327 EXPORT_SYMBOL(bdev_freeze);
328
329 /**
330 * bdev_thaw - unlock filesystem
331 * @bdev: blockdevice to unlock
332 *
333 * Unlocks the filesystem and marks it writeable again after bdev_freeze().
334 *
335 * Return: On success zero is returned, negative error code on failure.
336 */
bdev_thaw(struct block_device * bdev)337 int bdev_thaw(struct block_device *bdev)
338 {
339 int error = -EINVAL, nr_freeze;
340
341 mutex_lock(&bdev->bd_fsfreeze_mutex);
342
343 /*
344 * If this returns < 0 it means that @bd_fsfreeze_count was
345 * already 0 and no decrement was performed.
346 */
347 nr_freeze = atomic_dec_if_positive(&bdev->bd_fsfreeze_count);
348 if (nr_freeze < 0)
349 goto out;
350
351 error = 0;
352 if (nr_freeze > 0)
353 goto out;
354
355 mutex_lock(&bdev->bd_holder_lock);
356 if (bdev->bd_holder_ops && bdev->bd_holder_ops->thaw) {
357 error = bdev->bd_holder_ops->thaw(bdev);
358 lockdep_assert_not_held(&bdev->bd_holder_lock);
359 } else {
360 mutex_unlock(&bdev->bd_holder_lock);
361 }
362
363 if (error)
364 atomic_inc(&bdev->bd_fsfreeze_count);
365 out:
366 mutex_unlock(&bdev->bd_fsfreeze_mutex);
367 return error;
368 }
369 EXPORT_SYMBOL(bdev_thaw);
370
371 /*
372 * pseudo-fs
373 */
374
375 static __cacheline_aligned_in_smp DEFINE_MUTEX(bdev_lock);
376 static struct kmem_cache *bdev_cachep __ro_after_init;
377
bdev_alloc_inode(struct super_block * sb)378 static struct inode *bdev_alloc_inode(struct super_block *sb)
379 {
380 struct bdev_inode *ei = alloc_inode_sb(sb, bdev_cachep, GFP_KERNEL);
381
382 if (!ei)
383 return NULL;
384 memset(&ei->bdev, 0, sizeof(ei->bdev));
385
386 if (security_bdev_alloc(&ei->bdev)) {
387 kmem_cache_free(bdev_cachep, ei);
388 return NULL;
389 }
390 return &ei->vfs_inode;
391 }
392
bdev_free_inode(struct inode * inode)393 static void bdev_free_inode(struct inode *inode)
394 {
395 struct block_device *bdev = I_BDEV(inode);
396
397 free_percpu(bdev->bd_stats);
398 kfree(bdev->bd_meta_info);
399 security_bdev_free(bdev);
400
401 if (!bdev_is_partition(bdev)) {
402 if (bdev->bd_disk && bdev->bd_disk->bdi)
403 bdi_put(bdev->bd_disk->bdi);
404 kfree(bdev->bd_disk);
405 }
406
407 if (MAJOR(bdev->bd_dev) == BLOCK_EXT_MAJOR)
408 blk_free_ext_minor(MINOR(bdev->bd_dev));
409
410 kmem_cache_free(bdev_cachep, BDEV_I(inode));
411 }
412
init_once(void * data)413 static void init_once(void *data)
414 {
415 struct bdev_inode *ei = data;
416
417 inode_init_once(&ei->vfs_inode);
418 }
419
bdev_evict_inode(struct inode * inode)420 static void bdev_evict_inode(struct inode *inode)
421 {
422 truncate_inode_pages_final(&inode->i_data);
423 invalidate_inode_buffers(inode); /* is it needed here? */
424 clear_inode(inode);
425 }
426
427 static const struct super_operations bdev_sops = {
428 .statfs = simple_statfs,
429 .alloc_inode = bdev_alloc_inode,
430 .free_inode = bdev_free_inode,
431 .drop_inode = inode_just_drop,
432 .evict_inode = bdev_evict_inode,
433 };
434
bd_init_fs_context(struct fs_context * fc)435 static int bd_init_fs_context(struct fs_context *fc)
436 {
437 struct pseudo_fs_context *ctx = init_pseudo(fc, BDEVFS_MAGIC);
438 if (!ctx)
439 return -ENOMEM;
440 fc->s_iflags |= SB_I_CGROUPWB;
441 ctx->ops = &bdev_sops;
442 return 0;
443 }
444
445 static struct file_system_type bd_type = {
446 .name = "bdev",
447 .init_fs_context = bd_init_fs_context,
448 .kill_sb = kill_anon_super,
449 };
450
451 struct super_block *blockdev_superblock __ro_after_init;
452 static struct vfsmount *blockdev_mnt __ro_after_init;
453 EXPORT_SYMBOL_GPL(blockdev_superblock);
454
bdev_cache_init(void)455 void __init bdev_cache_init(void)
456 {
457 int err;
458
459 bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode),
460 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
461 SLAB_ACCOUNT|SLAB_PANIC),
462 init_once);
463 err = register_filesystem(&bd_type);
464 if (err)
465 panic("Cannot register bdev pseudo-fs");
466 blockdev_mnt = kern_mount(&bd_type);
467 if (IS_ERR(blockdev_mnt))
468 panic("Cannot create bdev pseudo-fs");
469 blockdev_superblock = blockdev_mnt->mnt_sb; /* For writeback */
470 }
471
bdev_alloc(struct gendisk * disk,u8 partno)472 struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
473 {
474 struct block_device *bdev;
475 struct inode *inode;
476
477 inode = new_inode(blockdev_superblock);
478 if (!inode)
479 return NULL;
480 inode->i_mode = S_IFBLK;
481 inode->i_rdev = 0;
482 inode->i_data.a_ops = &def_blk_aops;
483 mapping_set_gfp_mask(&inode->i_data, GFP_USER);
484
485 bdev = I_BDEV(inode);
486 mutex_init(&bdev->bd_fsfreeze_mutex);
487 spin_lock_init(&bdev->bd_size_lock);
488 mutex_init(&bdev->bd_holder_lock);
489 atomic_set(&bdev->__bd_flags, partno);
490 bdev->bd_mapping = &inode->i_data;
491 bdev->bd_queue = disk->queue;
492 if (partno && bdev_test_flag(disk->part0, BD_HAS_SUBMIT_BIO))
493 bdev_set_flag(bdev, BD_HAS_SUBMIT_BIO);
494 bdev->bd_stats = alloc_percpu(struct disk_stats);
495 if (!bdev->bd_stats) {
496 iput(inode);
497 return NULL;
498 }
499 bdev->bd_disk = disk;
500 return bdev;
501 }
502
bdev_set_nr_sectors(struct block_device * bdev,sector_t sectors)503 void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors)
504 {
505 spin_lock(&bdev->bd_size_lock);
506 i_size_write(BD_INODE(bdev), (loff_t)sectors << SECTOR_SHIFT);
507 bdev->bd_nr_sectors = sectors;
508 spin_unlock(&bdev->bd_size_lock);
509 }
510
bdev_add(struct block_device * bdev,dev_t dev)511 void bdev_add(struct block_device *bdev, dev_t dev)
512 {
513 struct inode *inode = BD_INODE(bdev);
514 if (bdev_stable_writes(bdev))
515 mapping_set_stable_writes(bdev->bd_mapping);
516 bdev->bd_dev = dev;
517 inode->i_rdev = dev;
518 inode->i_ino = dev;
519 insert_inode_hash(inode);
520 }
521
bdev_unhash(struct block_device * bdev)522 void bdev_unhash(struct block_device *bdev)
523 {
524 remove_inode_hash(BD_INODE(bdev));
525 }
526
bdev_drop(struct block_device * bdev)527 void bdev_drop(struct block_device *bdev)
528 {
529 iput(BD_INODE(bdev));
530 }
531
nr_blockdev_pages(void)532 long nr_blockdev_pages(void)
533 {
534 struct inode *inode;
535 long ret = 0;
536
537 spin_lock(&blockdev_superblock->s_inode_list_lock);
538 list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list)
539 ret += inode->i_mapping->nrpages;
540 spin_unlock(&blockdev_superblock->s_inode_list_lock);
541
542 return ret;
543 }
544
545 /**
546 * bd_may_claim - test whether a block device can be claimed
547 * @bdev: block device of interest
548 * @holder: holder trying to claim @bdev
549 * @hops: holder ops
550 *
551 * Test whether @bdev can be claimed by @holder.
552 *
553 * RETURNS:
554 * %true if @bdev can be claimed, %false otherwise.
555 */
bd_may_claim(struct block_device * bdev,void * holder,const struct blk_holder_ops * hops)556 static bool bd_may_claim(struct block_device *bdev, void *holder,
557 const struct blk_holder_ops *hops)
558 {
559 struct block_device *whole = bdev_whole(bdev);
560
561 lockdep_assert_held(&bdev_lock);
562
563 if (bdev->bd_holder) {
564 /*
565 * The same holder can always re-claim.
566 */
567 if (bdev->bd_holder == holder) {
568 if (WARN_ON_ONCE(bdev->bd_holder_ops != hops))
569 return false;
570 return true;
571 }
572 return false;
573 }
574
575 /*
576 * If the whole devices holder is set to bd_may_claim, a partition on
577 * the device is claimed, but not the whole device.
578 */
579 if (whole != bdev &&
580 whole->bd_holder && whole->bd_holder != bd_may_claim)
581 return false;
582 return true;
583 }
584
585 /**
586 * bd_prepare_to_claim - claim a block device
587 * @bdev: block device of interest
588 * @holder: holder trying to claim @bdev
589 * @hops: holder ops.
590 *
591 * Claim @bdev. This function fails if @bdev is already claimed by another
592 * holder and waits if another claiming is in progress. return, the caller
593 * has ownership of bd_claiming and bd_holder[s].
594 *
595 * RETURNS:
596 * 0 if @bdev can be claimed, -EBUSY otherwise.
597 */
bd_prepare_to_claim(struct block_device * bdev,void * holder,const struct blk_holder_ops * hops)598 int bd_prepare_to_claim(struct block_device *bdev, void *holder,
599 const struct blk_holder_ops *hops)
600 {
601 struct block_device *whole = bdev_whole(bdev);
602
603 if (WARN_ON_ONCE(!holder))
604 return -EINVAL;
605 retry:
606 mutex_lock(&bdev_lock);
607 /* if someone else claimed, fail */
608 if (!bd_may_claim(bdev, holder, hops)) {
609 mutex_unlock(&bdev_lock);
610 return -EBUSY;
611 }
612
613 /* if claiming is already in progress, wait for it to finish */
614 if (whole->bd_claiming) {
615 wait_queue_head_t *wq = __var_waitqueue(&whole->bd_claiming);
616 DEFINE_WAIT(wait);
617
618 prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
619 mutex_unlock(&bdev_lock);
620 schedule();
621 finish_wait(wq, &wait);
622 goto retry;
623 }
624
625 /* yay, all mine */
626 whole->bd_claiming = holder;
627 mutex_unlock(&bdev_lock);
628 return 0;
629 }
630 EXPORT_SYMBOL_GPL(bd_prepare_to_claim); /* only for the loop driver */
631
bd_clear_claiming(struct block_device * whole,void * holder)632 static void bd_clear_claiming(struct block_device *whole, void *holder)
633 {
634 lockdep_assert_held(&bdev_lock);
635 /* tell others that we're done */
636 BUG_ON(whole->bd_claiming != holder);
637 whole->bd_claiming = NULL;
638 wake_up_var(&whole->bd_claiming);
639 }
640
641 /**
642 * bd_finish_claiming - finish claiming of a block device
643 * @bdev: block device of interest
644 * @holder: holder that has claimed @bdev
645 * @hops: block device holder operations
646 *
647 * Finish exclusive open of a block device. Mark the device as exlusively
648 * open by the holder and wake up all waiters for exclusive open to finish.
649 */
bd_finish_claiming(struct block_device * bdev,void * holder,const struct blk_holder_ops * hops)650 static void bd_finish_claiming(struct block_device *bdev, void *holder,
651 const struct blk_holder_ops *hops)
652 {
653 struct block_device *whole = bdev_whole(bdev);
654
655 mutex_lock(&bdev_lock);
656 BUG_ON(!bd_may_claim(bdev, holder, hops));
657 /*
658 * Note that for a whole device bd_holders will be incremented twice,
659 * and bd_holder will be set to bd_may_claim before being set to holder
660 */
661 whole->bd_holders++;
662 whole->bd_holder = bd_may_claim;
663 bdev->bd_holders++;
664 mutex_lock(&bdev->bd_holder_lock);
665 bdev->bd_holder = holder;
666 bdev->bd_holder_ops = hops;
667 mutex_unlock(&bdev->bd_holder_lock);
668 bd_clear_claiming(whole, holder);
669 mutex_unlock(&bdev_lock);
670 }
671
672 /**
673 * bd_abort_claiming - abort claiming of a block device
674 * @bdev: block device of interest
675 * @holder: holder that has claimed @bdev
676 *
677 * Abort claiming of a block device when the exclusive open failed. This can be
678 * also used when exclusive open is not actually desired and we just needed
679 * to block other exclusive openers for a while.
680 */
bd_abort_claiming(struct block_device * bdev,void * holder)681 void bd_abort_claiming(struct block_device *bdev, void *holder)
682 {
683 mutex_lock(&bdev_lock);
684 bd_clear_claiming(bdev_whole(bdev), holder);
685 mutex_unlock(&bdev_lock);
686 }
687 EXPORT_SYMBOL(bd_abort_claiming);
688
bd_end_claim(struct block_device * bdev,void * holder)689 static void bd_end_claim(struct block_device *bdev, void *holder)
690 {
691 struct block_device *whole = bdev_whole(bdev);
692 bool unblock = false;
693
694 /*
695 * Release a claim on the device. The holder fields are protected with
696 * bdev_lock. open_mutex is used to synchronize disk_holder unlinking.
697 */
698 mutex_lock(&bdev_lock);
699 WARN_ON_ONCE(bdev->bd_holder != holder);
700 WARN_ON_ONCE(--bdev->bd_holders < 0);
701 WARN_ON_ONCE(--whole->bd_holders < 0);
702 if (!bdev->bd_holders) {
703 mutex_lock(&bdev->bd_holder_lock);
704 bdev->bd_holder = NULL;
705 bdev->bd_holder_ops = NULL;
706 mutex_unlock(&bdev->bd_holder_lock);
707 if (bdev_test_flag(bdev, BD_WRITE_HOLDER))
708 unblock = true;
709 }
710 if (!whole->bd_holders)
711 whole->bd_holder = NULL;
712 mutex_unlock(&bdev_lock);
713
714 /*
715 * If this was the last claim, remove holder link and unblock evpoll if
716 * it was a write holder.
717 */
718 if (unblock) {
719 disk_unblock_events(bdev->bd_disk);
720 bdev_clear_flag(bdev, BD_WRITE_HOLDER);
721 }
722 }
723
blkdev_flush_mapping(struct block_device * bdev)724 static void blkdev_flush_mapping(struct block_device *bdev)
725 {
726 WARN_ON_ONCE(bdev->bd_holders);
727 sync_blockdev(bdev);
728 kill_bdev(bdev);
729 bdev_write_inode(bdev);
730 }
731
blkdev_put_whole(struct block_device * bdev)732 static void blkdev_put_whole(struct block_device *bdev)
733 {
734 if (atomic_dec_and_test(&bdev->bd_openers))
735 blkdev_flush_mapping(bdev);
736 if (bdev->bd_disk->fops->release)
737 bdev->bd_disk->fops->release(bdev->bd_disk);
738 }
739
blkdev_get_whole(struct block_device * bdev,blk_mode_t mode)740 static int blkdev_get_whole(struct block_device *bdev, blk_mode_t mode)
741 {
742 struct gendisk *disk = bdev->bd_disk;
743 int ret;
744
745 if (disk->fops->open) {
746 ret = disk->fops->open(disk, mode);
747 if (ret) {
748 /* avoid ghost partitions on a removed medium */
749 if (ret == -ENOMEDIUM &&
750 test_bit(GD_NEED_PART_SCAN, &disk->state))
751 bdev_disk_changed(disk, true);
752 return ret;
753 }
754 }
755
756 if (!atomic_read(&bdev->bd_openers))
757 set_init_blocksize(bdev);
758 atomic_inc(&bdev->bd_openers);
759 if (test_bit(GD_NEED_PART_SCAN, &disk->state)) {
760 /*
761 * Only return scanning errors if we are called from contexts
762 * that explicitly want them, e.g. the BLKRRPART ioctl.
763 */
764 ret = bdev_disk_changed(disk, false);
765 if (ret && (mode & BLK_OPEN_STRICT_SCAN)) {
766 blkdev_put_whole(bdev);
767 return ret;
768 }
769 }
770 return 0;
771 }
772
blkdev_get_part(struct block_device * part,blk_mode_t mode)773 static int blkdev_get_part(struct block_device *part, blk_mode_t mode)
774 {
775 struct gendisk *disk = part->bd_disk;
776 int ret;
777
778 ret = blkdev_get_whole(bdev_whole(part), mode);
779 if (ret)
780 return ret;
781
782 ret = -ENXIO;
783 if (!bdev_nr_sectors(part))
784 goto out_blkdev_put;
785
786 if (!atomic_read(&part->bd_openers)) {
787 disk->open_partitions++;
788 set_init_blocksize(part);
789 }
790 atomic_inc(&part->bd_openers);
791 return 0;
792
793 out_blkdev_put:
794 blkdev_put_whole(bdev_whole(part));
795 return ret;
796 }
797
bdev_permission(dev_t dev,blk_mode_t mode,void * holder)798 int bdev_permission(dev_t dev, blk_mode_t mode, void *holder)
799 {
800 int ret;
801
802 ret = devcgroup_check_permission(DEVCG_DEV_BLOCK,
803 MAJOR(dev), MINOR(dev),
804 ((mode & BLK_OPEN_READ) ? DEVCG_ACC_READ : 0) |
805 ((mode & BLK_OPEN_WRITE) ? DEVCG_ACC_WRITE : 0));
806 if (ret)
807 return ret;
808
809 /* Blocking writes requires exclusive opener */
810 if (mode & BLK_OPEN_RESTRICT_WRITES && !holder)
811 return -EINVAL;
812
813 /*
814 * We're using error pointers to indicate to ->release() when we
815 * failed to open that block device. Also this doesn't make sense.
816 */
817 if (WARN_ON_ONCE(IS_ERR(holder)))
818 return -EINVAL;
819
820 return 0;
821 }
822
blkdev_put_part(struct block_device * part)823 static void blkdev_put_part(struct block_device *part)
824 {
825 struct block_device *whole = bdev_whole(part);
826
827 if (atomic_dec_and_test(&part->bd_openers)) {
828 blkdev_flush_mapping(part);
829 whole->bd_disk->open_partitions--;
830 }
831 blkdev_put_whole(whole);
832 }
833
blkdev_get_no_open(dev_t dev,bool autoload)834 struct block_device *blkdev_get_no_open(dev_t dev, bool autoload)
835 {
836 struct block_device *bdev;
837 struct inode *inode;
838
839 inode = ilookup(blockdev_superblock, dev);
840 if (!inode && autoload && IS_ENABLED(CONFIG_BLOCK_LEGACY_AUTOLOAD)) {
841 blk_request_module(dev);
842 inode = ilookup(blockdev_superblock, dev);
843 if (inode)
844 pr_warn_ratelimited(
845 "block device autoloading is deprecated and will be removed.\n");
846 }
847 if (!inode)
848 return NULL;
849
850 /* switch from the inode reference to a device mode one: */
851 bdev = &BDEV_I(inode)->bdev;
852 if (!kobject_get_unless_zero(&bdev->bd_device.kobj))
853 bdev = NULL;
854 iput(inode);
855 return bdev;
856 }
857
blkdev_put_no_open(struct block_device * bdev)858 void blkdev_put_no_open(struct block_device *bdev)
859 {
860 put_device(&bdev->bd_device);
861 }
862
bdev_writes_blocked(struct block_device * bdev)863 static bool bdev_writes_blocked(struct block_device *bdev)
864 {
865 return bdev->bd_writers < 0;
866 }
867
bdev_block_writes(struct block_device * bdev)868 static void bdev_block_writes(struct block_device *bdev)
869 {
870 bdev->bd_writers--;
871 }
872
bdev_unblock_writes(struct block_device * bdev)873 static void bdev_unblock_writes(struct block_device *bdev)
874 {
875 bdev->bd_writers++;
876 }
877
bdev_may_open(struct block_device * bdev,blk_mode_t mode)878 static bool bdev_may_open(struct block_device *bdev, blk_mode_t mode)
879 {
880 if (bdev_allow_write_mounted)
881 return true;
882 /* Writes blocked? */
883 if (mode & BLK_OPEN_WRITE && bdev_writes_blocked(bdev))
884 return false;
885 if (mode & BLK_OPEN_RESTRICT_WRITES && bdev->bd_writers > 0)
886 return false;
887 return true;
888 }
889
bdev_claim_write_access(struct block_device * bdev,blk_mode_t mode)890 static void bdev_claim_write_access(struct block_device *bdev, blk_mode_t mode)
891 {
892 if (bdev_allow_write_mounted)
893 return;
894
895 /* Claim exclusive or shared write access. */
896 if (mode & BLK_OPEN_RESTRICT_WRITES)
897 bdev_block_writes(bdev);
898 else if (mode & BLK_OPEN_WRITE)
899 bdev->bd_writers++;
900 }
901
bdev_unclaimed(const struct file * bdev_file)902 static inline bool bdev_unclaimed(const struct file *bdev_file)
903 {
904 return bdev_file->private_data == BDEV_I(bdev_file->f_mapping->host);
905 }
906
bdev_yield_write_access(struct file * bdev_file)907 static void bdev_yield_write_access(struct file *bdev_file)
908 {
909 struct block_device *bdev;
910
911 if (bdev_allow_write_mounted)
912 return;
913
914 if (bdev_unclaimed(bdev_file))
915 return;
916
917 bdev = file_bdev(bdev_file);
918
919 if (bdev_file->f_mode & FMODE_WRITE_RESTRICTED)
920 bdev_unblock_writes(bdev);
921 else if (bdev_file->f_mode & FMODE_WRITE)
922 bdev->bd_writers--;
923 }
924
925 /**
926 * bdev_open - open a block device
927 * @bdev: block device to open
928 * @mode: open mode (BLK_OPEN_*)
929 * @holder: exclusive holder identifier
930 * @hops: holder operations
931 * @bdev_file: file for the block device
932 *
933 * Open the block device. If @holder is not %NULL, the block device is opened
934 * with exclusive access. Exclusive opens may nest for the same @holder.
935 *
936 * CONTEXT:
937 * Might sleep.
938 *
939 * RETURNS:
940 * zero on success, -errno on failure.
941 */
bdev_open(struct block_device * bdev,blk_mode_t mode,void * holder,const struct blk_holder_ops * hops,struct file * bdev_file)942 int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder,
943 const struct blk_holder_ops *hops, struct file *bdev_file)
944 {
945 bool unblock_events = true;
946 struct gendisk *disk = bdev->bd_disk;
947 int ret;
948
949 if (holder) {
950 mode |= BLK_OPEN_EXCL;
951 ret = bd_prepare_to_claim(bdev, holder, hops);
952 if (ret)
953 return ret;
954 } else {
955 if (WARN_ON_ONCE(mode & BLK_OPEN_EXCL))
956 return -EIO;
957 }
958
959 disk_block_events(disk);
960
961 mutex_lock(&disk->open_mutex);
962 ret = -ENXIO;
963 if (!disk_live(disk))
964 goto abort_claiming;
965 if (!try_module_get(disk->fops->owner))
966 goto abort_claiming;
967 ret = -EBUSY;
968 if (!bdev_may_open(bdev, mode))
969 goto put_module;
970 if (bdev_is_partition(bdev))
971 ret = blkdev_get_part(bdev, mode);
972 else
973 ret = blkdev_get_whole(bdev, mode);
974 if (ret)
975 goto put_module;
976 bdev_claim_write_access(bdev, mode);
977 if (holder) {
978 bd_finish_claiming(bdev, holder, hops);
979
980 /*
981 * Block event polling for write claims if requested. Any write
982 * holder makes the write_holder state stick until all are
983 * released. This is good enough and tracking individual
984 * writeable reference is too fragile given the way @mode is
985 * used in blkdev_get/put().
986 */
987 if ((mode & BLK_OPEN_WRITE) &&
988 !bdev_test_flag(bdev, BD_WRITE_HOLDER) &&
989 (disk->event_flags & DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE)) {
990 bdev_set_flag(bdev, BD_WRITE_HOLDER);
991 unblock_events = false;
992 }
993 }
994 mutex_unlock(&disk->open_mutex);
995
996 if (unblock_events)
997 disk_unblock_events(disk);
998
999 bdev_file->f_flags |= O_LARGEFILE;
1000 bdev_file->f_mode |= FMODE_CAN_ODIRECT;
1001 if (bdev_nowait(bdev))
1002 bdev_file->f_mode |= FMODE_NOWAIT;
1003 if (mode & BLK_OPEN_RESTRICT_WRITES)
1004 bdev_file->f_mode |= FMODE_WRITE_RESTRICTED;
1005 bdev_file->f_mapping = bdev->bd_mapping;
1006 bdev_file->f_wb_err = filemap_sample_wb_err(bdev_file->f_mapping);
1007 bdev_file->private_data = holder;
1008
1009 return 0;
1010 put_module:
1011 module_put(disk->fops->owner);
1012 abort_claiming:
1013 if (holder)
1014 bd_abort_claiming(bdev, holder);
1015 mutex_unlock(&disk->open_mutex);
1016 disk_unblock_events(disk);
1017 return ret;
1018 }
1019
1020 /*
1021 * If BLK_OPEN_WRITE_IOCTL is set then this is a historical quirk
1022 * associated with the floppy driver where it has allowed ioctls if the
1023 * file was opened for writing, but does not allow reads or writes.
1024 * Make sure that this quirk is reflected in @f_flags.
1025 *
1026 * It can also happen if a block device is opened as O_RDWR | O_WRONLY.
1027 */
blk_to_file_flags(blk_mode_t mode)1028 static unsigned blk_to_file_flags(blk_mode_t mode)
1029 {
1030 unsigned int flags = 0;
1031
1032 if ((mode & (BLK_OPEN_READ | BLK_OPEN_WRITE)) ==
1033 (BLK_OPEN_READ | BLK_OPEN_WRITE))
1034 flags |= O_RDWR;
1035 else if (mode & BLK_OPEN_WRITE_IOCTL)
1036 flags |= O_RDWR | O_WRONLY;
1037 else if (mode & BLK_OPEN_WRITE)
1038 flags |= O_WRONLY;
1039 else if (mode & BLK_OPEN_READ)
1040 flags |= O_RDONLY; /* homeopathic, because O_RDONLY is 0 */
1041 else
1042 WARN_ON_ONCE(true);
1043
1044 if (mode & BLK_OPEN_NDELAY)
1045 flags |= O_NDELAY;
1046
1047 return flags;
1048 }
1049
bdev_file_open_by_dev(dev_t dev,blk_mode_t mode,void * holder,const struct blk_holder_ops * hops)1050 struct file *bdev_file_open_by_dev(dev_t dev, blk_mode_t mode, void *holder,
1051 const struct blk_holder_ops *hops)
1052 {
1053 struct file *bdev_file;
1054 struct block_device *bdev;
1055 unsigned int flags;
1056 int ret;
1057
1058 ret = bdev_permission(dev, mode, holder);
1059 if (ret)
1060 return ERR_PTR(ret);
1061
1062 bdev = blkdev_get_no_open(dev, true);
1063 if (!bdev)
1064 return ERR_PTR(-ENXIO);
1065
1066 flags = blk_to_file_flags(mode);
1067 bdev_file = alloc_file_pseudo_noaccount(BD_INODE(bdev),
1068 blockdev_mnt, "", flags | O_LARGEFILE, &def_blk_fops);
1069 if (IS_ERR(bdev_file)) {
1070 blkdev_put_no_open(bdev);
1071 return bdev_file;
1072 }
1073 ihold(BD_INODE(bdev));
1074
1075 ret = bdev_open(bdev, mode, holder, hops, bdev_file);
1076 if (ret) {
1077 /* We failed to open the block device. Let ->release() know. */
1078 bdev_file->private_data = ERR_PTR(ret);
1079 fput(bdev_file);
1080 return ERR_PTR(ret);
1081 }
1082 return bdev_file;
1083 }
1084 EXPORT_SYMBOL(bdev_file_open_by_dev);
1085
bdev_file_open_by_path(const char * path,blk_mode_t mode,void * holder,const struct blk_holder_ops * hops)1086 struct file *bdev_file_open_by_path(const char *path, blk_mode_t mode,
1087 void *holder,
1088 const struct blk_holder_ops *hops)
1089 {
1090 struct file *file;
1091 dev_t dev;
1092 int error;
1093
1094 error = lookup_bdev(path, &dev);
1095 if (error)
1096 return ERR_PTR(error);
1097
1098 file = bdev_file_open_by_dev(dev, mode, holder, hops);
1099 if (!IS_ERR(file) && (mode & BLK_OPEN_WRITE)) {
1100 if (bdev_read_only(file_bdev(file))) {
1101 fput(file);
1102 file = ERR_PTR(-EACCES);
1103 }
1104 }
1105
1106 return file;
1107 }
1108 EXPORT_SYMBOL(bdev_file_open_by_path);
1109
bd_yield_claim(struct file * bdev_file)1110 static inline void bd_yield_claim(struct file *bdev_file)
1111 {
1112 struct block_device *bdev = file_bdev(bdev_file);
1113 void *holder = bdev_file->private_data;
1114
1115 lockdep_assert_held(&bdev->bd_disk->open_mutex);
1116
1117 if (WARN_ON_ONCE(IS_ERR_OR_NULL(holder)))
1118 return;
1119
1120 if (!bdev_unclaimed(bdev_file))
1121 bd_end_claim(bdev, holder);
1122 }
1123
bdev_release(struct file * bdev_file)1124 void bdev_release(struct file *bdev_file)
1125 {
1126 struct block_device *bdev = file_bdev(bdev_file);
1127 void *holder = bdev_file->private_data;
1128 struct gendisk *disk = bdev->bd_disk;
1129
1130 /* We failed to open that block device. */
1131 if (IS_ERR(holder))
1132 goto put_no_open;
1133
1134 /*
1135 * Sync early if it looks like we're the last one. If someone else
1136 * opens the block device between now and the decrement of bd_openers
1137 * then we did a sync that we didn't need to, but that's not the end
1138 * of the world and we want to avoid long (could be several minute)
1139 * syncs while holding the mutex.
1140 */
1141 if (atomic_read(&bdev->bd_openers) == 1)
1142 sync_blockdev(bdev);
1143
1144 mutex_lock(&disk->open_mutex);
1145 bdev_yield_write_access(bdev_file);
1146
1147 if (holder)
1148 bd_yield_claim(bdev_file);
1149
1150 /*
1151 * Trigger event checking and tell drivers to flush MEDIA_CHANGE
1152 * event. This is to ensure detection of media removal commanded
1153 * from userland - e.g. eject(1).
1154 */
1155 disk_flush_events(disk, DISK_EVENT_MEDIA_CHANGE);
1156
1157 if (bdev_is_partition(bdev))
1158 blkdev_put_part(bdev);
1159 else
1160 blkdev_put_whole(bdev);
1161 mutex_unlock(&disk->open_mutex);
1162
1163 module_put(disk->fops->owner);
1164 put_no_open:
1165 blkdev_put_no_open(bdev);
1166 }
1167
1168 /**
1169 * bdev_fput - yield claim to the block device and put the file
1170 * @bdev_file: open block device
1171 *
1172 * Yield claim on the block device and put the file. Ensure that the
1173 * block device can be reclaimed before the file is closed which is a
1174 * deferred operation.
1175 */
bdev_fput(struct file * bdev_file)1176 void bdev_fput(struct file *bdev_file)
1177 {
1178 if (WARN_ON_ONCE(bdev_file->f_op != &def_blk_fops))
1179 return;
1180
1181 if (bdev_file->private_data) {
1182 struct block_device *bdev = file_bdev(bdev_file);
1183 struct gendisk *disk = bdev->bd_disk;
1184
1185 mutex_lock(&disk->open_mutex);
1186 bdev_yield_write_access(bdev_file);
1187 bd_yield_claim(bdev_file);
1188 /*
1189 * Tell release we already gave up our hold on the
1190 * device and if write restrictions are available that
1191 * we already gave up write access to the device.
1192 */
1193 bdev_file->private_data = BDEV_I(bdev_file->f_mapping->host);
1194 mutex_unlock(&disk->open_mutex);
1195 }
1196
1197 fput(bdev_file);
1198 }
1199 EXPORT_SYMBOL(bdev_fput);
1200
1201 /**
1202 * lookup_bdev() - Look up a struct block_device by name.
1203 * @pathname: Name of the block device in the filesystem.
1204 * @dev: Pointer to the block device's dev_t, if found.
1205 *
1206 * Lookup the block device's dev_t at @pathname in the current
1207 * namespace if possible and return it in @dev.
1208 *
1209 * Context: May sleep.
1210 * Return: 0 if succeeded, negative errno otherwise.
1211 */
lookup_bdev(const char * pathname,dev_t * dev)1212 int lookup_bdev(const char *pathname, dev_t *dev)
1213 {
1214 struct inode *inode;
1215 struct path path;
1216 int error;
1217
1218 if (!pathname || !*pathname)
1219 return -EINVAL;
1220
1221 error = kern_path(pathname, LOOKUP_FOLLOW, &path);
1222 if (error)
1223 return error;
1224
1225 inode = d_backing_inode(path.dentry);
1226 error = -ENOTBLK;
1227 if (!S_ISBLK(inode->i_mode))
1228 goto out_path_put;
1229 error = -EACCES;
1230 if (!may_open_dev(&path))
1231 goto out_path_put;
1232
1233 *dev = inode->i_rdev;
1234 error = 0;
1235 out_path_put:
1236 path_put(&path);
1237 return error;
1238 }
1239 EXPORT_SYMBOL(lookup_bdev);
1240
1241 /**
1242 * bdev_mark_dead - mark a block device as dead
1243 * @bdev: block device to operate on
1244 * @surprise: indicate a surprise removal
1245 *
1246 * Tell the file system that this devices or media is dead. If @surprise is set
1247 * to %true the device or media is already gone, if not we are preparing for an
1248 * orderly removal.
1249 *
1250 * This calls into the file system, which then typicall syncs out all dirty data
1251 * and writes back inodes and then invalidates any cached data in the inodes on
1252 * the file system. In addition we also invalidate the block device mapping.
1253 */
bdev_mark_dead(struct block_device * bdev,bool surprise)1254 void bdev_mark_dead(struct block_device *bdev, bool surprise)
1255 {
1256 mutex_lock(&bdev->bd_holder_lock);
1257 if (bdev->bd_holder_ops && bdev->bd_holder_ops->mark_dead)
1258 bdev->bd_holder_ops->mark_dead(bdev, surprise);
1259 else {
1260 mutex_unlock(&bdev->bd_holder_lock);
1261 sync_blockdev(bdev);
1262 }
1263
1264 invalidate_bdev(bdev);
1265 }
1266 /*
1267 * New drivers should not use this directly. There are some drivers however
1268 * that needs this for historical reasons. For example, the DASD driver has
1269 * historically had a shutdown to offline mode that doesn't actually remove the
1270 * gendisk that otherwise looks a lot like a safe device removal.
1271 */
1272 EXPORT_SYMBOL_GPL(bdev_mark_dead);
1273
sync_bdevs(bool wait)1274 void sync_bdevs(bool wait)
1275 {
1276 struct inode *inode, *old_inode = NULL;
1277
1278 spin_lock(&blockdev_superblock->s_inode_list_lock);
1279 list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) {
1280 struct address_space *mapping = inode->i_mapping;
1281 struct block_device *bdev;
1282
1283 spin_lock(&inode->i_lock);
1284 if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE | I_NEW) ||
1285 mapping->nrpages == 0) {
1286 spin_unlock(&inode->i_lock);
1287 continue;
1288 }
1289 __iget(inode);
1290 spin_unlock(&inode->i_lock);
1291 spin_unlock(&blockdev_superblock->s_inode_list_lock);
1292 /*
1293 * We hold a reference to 'inode' so it couldn't have been
1294 * removed from s_inodes list while we dropped the
1295 * s_inode_list_lock We cannot iput the inode now as we can
1296 * be holding the last reference and we cannot iput it under
1297 * s_inode_list_lock. So we keep the reference and iput it
1298 * later.
1299 */
1300 iput(old_inode);
1301 old_inode = inode;
1302 bdev = I_BDEV(inode);
1303
1304 mutex_lock(&bdev->bd_disk->open_mutex);
1305 if (!atomic_read(&bdev->bd_openers)) {
1306 ; /* skip */
1307 } else if (wait) {
1308 /*
1309 * We keep the error status of individual mapping so
1310 * that applications can catch the writeback error using
1311 * fsync(2). See filemap_fdatawait_keep_errors() for
1312 * details.
1313 */
1314 filemap_fdatawait_keep_errors(inode->i_mapping);
1315 } else {
1316 filemap_fdatawrite(inode->i_mapping);
1317 }
1318 mutex_unlock(&bdev->bd_disk->open_mutex);
1319
1320 spin_lock(&blockdev_superblock->s_inode_list_lock);
1321 }
1322 spin_unlock(&blockdev_superblock->s_inode_list_lock);
1323 iput(old_inode);
1324 }
1325
1326 /*
1327 * Handle STATX_{DIOALIGN, WRITE_ATOMIC} for block devices.
1328 */
bdev_statx(const struct path * path,struct kstat * stat,u32 request_mask)1329 void bdev_statx(const struct path *path, struct kstat *stat, u32 request_mask)
1330 {
1331 struct block_device *bdev;
1332
1333 /*
1334 * Note that d_backing_inode() returns the block device node inode, not
1335 * the block device's internal inode. Therefore it is *not* valid to
1336 * use I_BDEV() here; the block device has to be looked up by i_rdev
1337 * instead.
1338 */
1339 bdev = blkdev_get_no_open(d_backing_inode(path->dentry)->i_rdev, false);
1340 if (!bdev)
1341 return;
1342
1343 if (request_mask & STATX_DIOALIGN) {
1344 stat->dio_mem_align = bdev_dma_alignment(bdev) + 1;
1345 stat->dio_offset_align = bdev_logical_block_size(bdev);
1346 stat->result_mask |= STATX_DIOALIGN;
1347 }
1348
1349 if (request_mask & STATX_WRITE_ATOMIC && bdev_can_atomic_write(bdev)) {
1350 struct request_queue *bd_queue = bdev->bd_queue;
1351
1352 generic_fill_statx_atomic_writes(stat,
1353 queue_atomic_write_unit_min_bytes(bd_queue),
1354 queue_atomic_write_unit_max_bytes(bd_queue),
1355 0);
1356 }
1357
1358 stat->blksize = bdev_io_min(bdev);
1359
1360 blkdev_put_no_open(bdev);
1361 }
1362
disk_live(struct gendisk * disk)1363 bool disk_live(struct gendisk *disk)
1364 {
1365 return !inode_unhashed(BD_INODE(disk->part0));
1366 }
1367 EXPORT_SYMBOL_GPL(disk_live);
1368
block_size(struct block_device * bdev)1369 unsigned int block_size(struct block_device *bdev)
1370 {
1371 return 1 << BD_INODE(bdev)->i_blkbits;
1372 }
1373 EXPORT_SYMBOL_GPL(block_size);
1374
setup_bdev_allow_write_mounted(char * str)1375 static int __init setup_bdev_allow_write_mounted(char *str)
1376 {
1377 if (kstrtobool(str, &bdev_allow_write_mounted))
1378 pr_warn("Invalid option string for bdev_allow_write_mounted:"
1379 " '%s'\n", str);
1380 return 1;
1381 }
1382 __setup("bdev_allow_write_mounted=", setup_bdev_allow_write_mounted);
1383