Lines Matching +full:foo +full:- +full:queue
1 // SPDX-License-Identifier: GPL-2.0-only
18 #include <linux/backing-dev.h>
53 return &BDEV_I(inode)->bdev; in I_BDEV()
59 struct inode *inode = bdev->bd_inode; in bdev_write_inode()
62 spin_lock(&inode->i_lock); in bdev_write_inode()
63 while (inode->i_state & I_DIRTY) { in bdev_write_inode()
64 spin_unlock(&inode->i_lock); in bdev_write_inode()
72 spin_lock(&inode->i_lock); in bdev_write_inode()
74 spin_unlock(&inode->i_lock); in bdev_write_inode()
80 struct address_space *mapping = bdev->bd_inode->i_mapping; in kill_bdev()
82 if (mapping->nrpages == 0 && mapping->nrexceptional == 0) in kill_bdev()
92 struct address_space *mapping = bdev->bd_inode->i_mapping; in invalidate_bdev()
94 if (mapping->nrpages) { in invalidate_bdev()
97 invalidate_mapping_pages(mapping, 0, -1); in invalidate_bdev()
122 claimed_bdev = bdev->bd_contains; in truncate_bdev_range()
128 truncate_inode_pages_range(bdev->bd_inode->i_mapping, lstart, lend); in truncate_bdev_range()
137 bdev->bd_inode->i_blkbits = blksize_bits(bdev_logical_block_size(bdev)); in set_init_blocksize()
144 return -EINVAL; in set_blocksize()
148 return -EINVAL; in set_blocksize()
151 if (bdev->bd_inode->i_blkbits != blksize_bits(size)) { in set_blocksize()
153 bdev->bd_inode->i_blkbits = blksize_bits(size); in set_blocksize()
163 if (set_blocksize(sb->s_bdev, size)) in sb_set_blocksize()
167 sb->s_blocksize = size; in sb_set_blocksize()
168 sb->s_blocksize_bits = blksize_bits(size); in sb_set_blocksize()
169 return sb->s_blocksize; in sb_set_blocksize()
176 int minsize = bdev_logical_block_size(sb->s_bdev); in sb_min_blocksize()
188 bh->b_bdev = I_BDEV(inode); in blkdev_get_block()
189 bh->b_blocknr = iblock; in blkdev_get_block()
196 return file->f_mapping->host; in bdev_file_inode()
204 if (iocb->ki_flags & IOCB_DSYNC) in dio_bio_write_op()
213 struct task_struct *waiter = bio->bi_private; in blkdev_bio_end_io_simple()
215 WRITE_ONCE(bio->bi_private, NULL); in blkdev_bio_end_io_simple()
223 struct file *file = iocb->ki_filp; in __blkdev_direct_IO_simple()
226 loff_t pos = iocb->ki_pos; in __blkdev_direct_IO_simple()
233 (bdev_logical_block_size(bdev) - 1)) in __blkdev_direct_IO_simple()
234 return -EINVAL; in __blkdev_direct_IO_simple()
242 return -ENOMEM; in __blkdev_direct_IO_simple()
248 bio.bi_write_hint = iocb->ki_hint; in __blkdev_direct_IO_simple()
251 bio.bi_ioprio = iocb->ki_ioprio; in __blkdev_direct_IO_simple()
266 if (iocb->ki_flags & IOCB_HIPRI) in __blkdev_direct_IO_simple()
274 if (!(iocb->ki_flags & IOCB_HIPRI) || in __blkdev_direct_IO_simple()
310 struct block_device *bdev = I_BDEV(kiocb->ki_filp->f_mapping->host); in blkdev_iopoll()
313 return blk_poll(q, READ_ONCE(kiocb->ki_cookie), wait); in blkdev_iopoll()
318 struct blkdev_dio *dio = bio->bi_private; in blkdev_bio_end_io()
319 bool should_dirty = dio->should_dirty; in blkdev_bio_end_io()
321 if (bio->bi_status && !dio->bio.bi_status) in blkdev_bio_end_io()
322 dio->bio.bi_status = bio->bi_status; in blkdev_bio_end_io()
324 if (!dio->multi_bio || atomic_dec_and_test(&dio->ref)) { in blkdev_bio_end_io()
325 if (!dio->is_sync) { in blkdev_bio_end_io()
326 struct kiocb *iocb = dio->iocb; in blkdev_bio_end_io()
329 if (likely(!dio->bio.bi_status)) { in blkdev_bio_end_io()
330 ret = dio->size; in blkdev_bio_end_io()
331 iocb->ki_pos += ret; in blkdev_bio_end_io()
333 ret = blk_status_to_errno(dio->bio.bi_status); in blkdev_bio_end_io()
336 dio->iocb->ki_complete(iocb, ret, 0); in blkdev_bio_end_io()
337 if (dio->multi_bio) in blkdev_bio_end_io()
338 bio_put(&dio->bio); in blkdev_bio_end_io()
340 struct task_struct *waiter = dio->waiter; in blkdev_bio_end_io()
342 WRITE_ONCE(dio->waiter, NULL); in blkdev_bio_end_io()
358 struct file *file = iocb->ki_filp; in __blkdev_direct_IO()
364 bool is_poll = (iocb->ki_flags & IOCB_HIPRI) != 0; in __blkdev_direct_IO()
366 loff_t pos = iocb->ki_pos; in __blkdev_direct_IO()
371 (bdev_logical_block_size(bdev) - 1)) in __blkdev_direct_IO()
372 return -EINVAL; in __blkdev_direct_IO()
377 dio->is_sync = is_sync = is_sync_kiocb(iocb); in __blkdev_direct_IO()
378 if (dio->is_sync) { in __blkdev_direct_IO()
379 dio->waiter = current; in __blkdev_direct_IO()
382 dio->iocb = iocb; in __blkdev_direct_IO()
385 dio->size = 0; in __blkdev_direct_IO()
386 dio->multi_bio = false; in __blkdev_direct_IO()
387 dio->should_dirty = is_read && iter_is_iovec(iter); in __blkdev_direct_IO()
398 bio->bi_iter.bi_sector = pos >> 9; in __blkdev_direct_IO()
399 bio->bi_write_hint = iocb->ki_hint; in __blkdev_direct_IO()
400 bio->bi_private = dio; in __blkdev_direct_IO()
401 bio->bi_end_io = blkdev_bio_end_io; in __blkdev_direct_IO()
402 bio->bi_ioprio = iocb->ki_ioprio; in __blkdev_direct_IO()
406 bio->bi_status = BLK_STS_IOERR; in __blkdev_direct_IO()
412 bio->bi_opf = REQ_OP_READ; in __blkdev_direct_IO()
413 if (dio->should_dirty) in __blkdev_direct_IO()
416 bio->bi_opf = dio_bio_write_op(iocb); in __blkdev_direct_IO()
417 task_io_account_write(bio->bi_iter.bi_size); in __blkdev_direct_IO()
420 dio->size += bio->bi_iter.bi_size; in __blkdev_direct_IO()
421 pos += bio->bi_iter.bi_size; in __blkdev_direct_IO()
427 if (iocb->ki_flags & IOCB_HIPRI) { in __blkdev_direct_IO()
435 WRITE_ONCE(iocb->ki_cookie, qc); in __blkdev_direct_IO()
439 if (!dio->multi_bio) { in __blkdev_direct_IO()
447 dio->multi_bio = true; in __blkdev_direct_IO()
448 atomic_set(&dio->ref, 2); in __blkdev_direct_IO()
450 atomic_inc(&dio->ref); in __blkdev_direct_IO()
461 return -EIOCBQUEUED; in __blkdev_direct_IO()
465 if (!READ_ONCE(dio->waiter)) in __blkdev_direct_IO()
468 if (!(iocb->ki_flags & IOCB_HIPRI) || in __blkdev_direct_IO()
475 ret = blk_status_to_errno(dio->bio.bi_status); in __blkdev_direct_IO()
477 ret = dio->size; in __blkdev_direct_IO()
479 bio_put(&dio->bio); in __blkdev_direct_IO()
508 return filemap_flush(bdev->bd_inode->i_mapping); in __sync_blockdev()
509 return filemap_write_and_wait(bdev->bd_inode->i_mapping); in __sync_blockdev()
540 * freeze_bdev -- lock a filesystem and force it into a consistent state
556 mutex_lock(&bdev->bd_fsfreeze_mutex); in freeze_bdev()
557 if (++bdev->bd_fsfreeze_count > 1) { in freeze_bdev()
559 * We don't even need to grab a reference - the first call in freeze_bdev()
566 mutex_unlock(&bdev->bd_fsfreeze_mutex); in freeze_bdev()
573 if (sb->s_op->freeze_super) in freeze_bdev()
574 error = sb->s_op->freeze_super(sb); in freeze_bdev()
579 bdev->bd_fsfreeze_count--; in freeze_bdev()
580 mutex_unlock(&bdev->bd_fsfreeze_mutex); in freeze_bdev()
586 mutex_unlock(&bdev->bd_fsfreeze_mutex); in freeze_bdev()
587 return sb; /* thaw_bdev releases s->s_umount */ in freeze_bdev()
592 * thaw_bdev -- unlock filesystem
600 int error = -EINVAL; in thaw_bdev()
602 mutex_lock(&bdev->bd_fsfreeze_mutex); in thaw_bdev()
603 if (!bdev->bd_fsfreeze_count) in thaw_bdev()
607 if (--bdev->bd_fsfreeze_count > 0) in thaw_bdev()
613 if (sb->s_op->thaw_super) in thaw_bdev()
614 error = sb->s_op->thaw_super(sb); in thaw_bdev()
618 bdev->bd_fsfreeze_count++; in thaw_bdev()
620 mutex_unlock(&bdev->bd_fsfreeze_mutex); in thaw_bdev()
663 * for a block special file file_inode(file)->i_size is zero
693 if (error == -EOPNOTSUPP) in blkdev_fsync()
701 * bdev_read_page() - Start reading a page from a block device
711 * queue full; callers should try a different route to read this page rather
719 const struct block_device_operations *ops = bdev->bd_disk->fops; in bdev_read_page()
720 int result = -EOPNOTSUPP; in bdev_read_page()
722 if (!ops->rw_page || bdev_get_integrity(bdev)) in bdev_read_page()
725 result = blk_queue_enter(bdev->bd_disk->queue, 0); in bdev_read_page()
728 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, in bdev_read_page()
730 blk_queue_exit(bdev->bd_disk->queue); in bdev_read_page()
735 * bdev_write_page() - Start writing a page to a block device
744 * queue the page to the device), the page will still be locked. If the
745 * caller is a ->writepage implementation, it will need to unlock the page.
748 * queue full; callers should try a different route to write this page rather
757 const struct block_device_operations *ops = bdev->bd_disk->fops; in bdev_write_page()
759 if (!ops->rw_page || bdev_get_integrity(bdev)) in bdev_write_page()
760 return -EOPNOTSUPP; in bdev_write_page()
761 result = blk_queue_enter(bdev->bd_disk->queue, 0); in bdev_write_page()
766 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, in bdev_write_page()
774 blk_queue_exit(bdev->bd_disk->queue); in bdev_write_page()
779 * pseudo-fs
790 return &ei->vfs_inode; in bdev_alloc_inode()
798 static void init_once(void *foo) in init_once() argument
800 struct bdev_inode *ei = (struct bdev_inode *) foo; in init_once()
801 struct block_device *bdev = &ei->bdev; in init_once()
804 mutex_init(&bdev->bd_mutex); in init_once()
806 INIT_LIST_HEAD(&bdev->bd_holder_disks); in init_once()
808 bdev->bd_bdi = &noop_backing_dev_info; in init_once()
809 inode_init_once(&ei->vfs_inode); in init_once()
811 mutex_init(&bdev->bd_fsfreeze_mutex); in init_once()
816 struct block_device *bdev = &BDEV_I(inode)->bdev; in bdev_evict_inode()
817 truncate_inode_pages_final(&inode->i_data); in bdev_evict_inode()
820 /* Detach inode from wb early as bdi_put() may free bdi->wb */ in bdev_evict_inode()
822 if (bdev->bd_bdi != &noop_backing_dev_info) { in bdev_evict_inode()
823 bdi_put(bdev->bd_bdi); in bdev_evict_inode()
824 bdev->bd_bdi = &noop_backing_dev_info; in bdev_evict_inode()
840 return -ENOMEM; in bd_init_fs_context()
841 fc->s_iflags |= SB_I_CGROUPWB; in bd_init_fs_context()
842 ctx->ops = &bdev_sops; in bd_init_fs_context()
866 panic("Cannot register bdev pseudo-fs"); in bdev_cache_init()
869 panic("Cannot create bdev pseudo-fs"); in bdev_cache_init()
870 blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */ in bdev_cache_init()
874 * Most likely _very_ bad one - but then it's hardly critical for small
885 return BDEV_I(inode)->bdev.bd_dev == *(dev_t *)data; in bdev_test()
890 BDEV_I(inode)->bdev.bd_dev = *(dev_t *)data; in bdev_set()
905 bdev = &BDEV_I(inode)->bdev; in bdget()
907 if (inode->i_state & I_NEW) { in bdget()
908 spin_lock_init(&bdev->bd_size_lock); in bdget()
909 bdev->bd_contains = NULL; in bdget()
910 bdev->bd_super = NULL; in bdget()
911 bdev->bd_inode = inode; in bdget()
912 bdev->bd_part_count = 0; in bdget()
913 inode->i_mode = S_IFBLK; in bdget()
914 inode->i_rdev = dev; in bdget()
915 inode->i_bdev = bdev; in bdget()
916 inode->i_data.a_ops = &def_blk_aops; in bdget()
917 mapping_set_gfp_mask(&inode->i_data, GFP_USER); in bdget()
924 * bdgrab -- Grab a reference to an already referenced block device
929 ihold(bdev->bd_inode); in bdgrab()
944 spin_lock(&blockdev_superblock->s_inode_list_lock); in nr_blockdev_pages()
945 list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) in nr_blockdev_pages()
946 ret += inode->i_mapping->nrpages; in nr_blockdev_pages()
947 spin_unlock(&blockdev_superblock->s_inode_list_lock); in nr_blockdev_pages()
954 iput(bdev->bd_inode); in bdput()
964 bdev = inode->i_bdev; in bd_acquire()
965 if (bdev && !inode_unhashed(bdev->bd_inode)) { in bd_acquire()
981 bdev = bdget(inode->i_rdev); in bd_acquire()
984 if (!inode->i_bdev) { in bd_acquire()
988 * So, we can access it via ->i_mapping always in bd_acquire()
992 inode->i_bdev = bdev; in bd_acquire()
993 inode->i_mapping = bdev->bd_inode->i_mapping; in bd_acquire()
1007 if (!sb_is_blkdev_sb(inode->i_sb)) in bd_forget()
1008 bdev = inode->i_bdev; in bd_forget()
1009 inode->i_bdev = NULL; in bd_forget()
1010 inode->i_mapping = &inode->i_data; in bd_forget()
1018 * bd_may_claim - test whether a block device can be claimed
1034 if (bdev->bd_holder == holder) in bd_may_claim()
1036 else if (bdev->bd_holder != NULL) in bd_may_claim()
1041 else if (whole->bd_holder == bd_may_claim) in bd_may_claim()
1043 else if (whole->bd_holder != NULL) in bd_may_claim()
1046 return true; /* is a partition of an un-held device */ in bd_may_claim()
1050 * bd_prepare_to_claim - claim a block device
1060 * 0 if @bdev can be claimed, -EBUSY otherwise.
1070 return -EBUSY; in bd_prepare_to_claim()
1074 if (whole->bd_claiming) { in bd_prepare_to_claim()
1075 wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0); in bd_prepare_to_claim()
1086 whole->bd_claiming = holder; in bd_prepare_to_claim()
1094 struct gendisk *disk = get_gendisk(bdev->bd_dev, partno); in bdev_get_gendisk()
1106 if (inode_unhashed(bdev->bd_inode)) { in bdev_get_gendisk()
1117 BUG_ON(whole->bd_claiming != holder); in bd_clear_claiming()
1118 whole->bd_claiming = NULL; in bd_clear_claiming()
1119 wake_up_bit(&whole->bd_claiming, 0); in bd_clear_claiming()
1123 * bd_finish_claiming - finish claiming of a block device
1140 whole->bd_holders++; in bd_finish_claiming()
1141 whole->bd_holder = bd_may_claim; in bd_finish_claiming()
1142 bdev->bd_holders++; in bd_finish_claiming()
1143 bdev->bd_holder = holder; in bd_finish_claiming()
1149 * bd_abort_claiming - abort claiming of a block device
1179 list_for_each_entry(holder, &bdev->bd_holder_disks, list) in bd_find_holder_disk()
1180 if (holder->disk == disk) in bd_find_holder_disk()
1196 * bd_link_disk_holder - create symlinks between holding disk and slave bdev
1204 * - from "slaves" directory of the holder @disk to the claimed @bdev
1205 * - from "holders" directory of the @bdev to the holder @disk
1207 * For example, if /dev/dm-0 maps to /dev/sda and disk for dm-0 is
1210 * /sys/block/dm-0/slaves/sda --> /sys/block/sda
1211 * /sys/block/sda/holders/dm-0 --> /sys/block/dm-0
1221 * 0 on success, -errno on failure.
1228 mutex_lock(&bdev->bd_mutex); in bd_link_disk_holder()
1230 WARN_ON_ONCE(!bdev->bd_holder); in bd_link_disk_holder()
1233 if (WARN_ON(!disk->slave_dir || !bdev->bd_part->holder_dir)) in bd_link_disk_holder()
1238 holder->refcnt++; in bd_link_disk_holder()
1244 ret = -ENOMEM; in bd_link_disk_holder()
1248 INIT_LIST_HEAD(&holder->list); in bd_link_disk_holder()
1249 holder->disk = disk; in bd_link_disk_holder()
1250 holder->refcnt = 1; in bd_link_disk_holder()
1252 ret = add_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); in bd_link_disk_holder()
1256 ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj); in bd_link_disk_holder()
1263 kobject_get(bdev->bd_part->holder_dir); in bd_link_disk_holder()
1265 list_add(&holder->list, &bdev->bd_holder_disks); in bd_link_disk_holder()
1269 del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); in bd_link_disk_holder()
1273 mutex_unlock(&bdev->bd_mutex); in bd_link_disk_holder()
1279 * bd_unlink_disk_holder - destroy symlinks created by bd_link_disk_holder()
1292 mutex_lock(&bdev->bd_mutex); in bd_unlink_disk_holder()
1296 if (!WARN_ON_ONCE(holder == NULL) && !--holder->refcnt) { in bd_unlink_disk_holder()
1297 del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); in bd_unlink_disk_holder()
1298 del_symlink(bdev->bd_part->holder_dir, in bd_unlink_disk_holder()
1299 &disk_to_dev(disk)->kobj); in bd_unlink_disk_holder()
1300 kobject_put(bdev->bd_part->holder_dir); in bd_unlink_disk_holder()
1301 list_del_init(&holder->list); in bd_unlink_disk_holder()
1305 mutex_unlock(&bdev->bd_mutex); in bd_unlink_disk_holder()
1311 * check_disk_size_change - checks for disk size change and adjusts bdev size.
1325 spin_lock(&bdev->bd_size_lock); in check_disk_size_change()
1327 bdev_size = i_size_read(bdev->bd_inode); in check_disk_size_change()
1332 disk->disk_name, bdev_size, disk_size); in check_disk_size_change()
1334 i_size_write(bdev->bd_inode, disk_size); in check_disk_size_change()
1336 spin_unlock(&bdev->bd_size_lock); in check_disk_size_change()
1341 disk->disk_name); in check_disk_size_change()
1346 * revalidate_disk_size - checks for disk size change and adjusts bdev size.
1362 if (disk->flags & GENHD_FL_HIDDEN) in revalidate_disk_size()
1375 spin_lock(&bdev->bd_size_lock); in bd_set_nr_sectors()
1376 i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT); in bd_set_nr_sectors()
1377 spin_unlock(&bdev->bd_size_lock); in bd_set_nr_sectors()
1385 struct gendisk *disk = bdev->bd_disk; in bdev_disk_changed()
1388 lockdep_assert_held(&bdev->bd_mutex); in bdev_disk_changed()
1390 clear_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state); in bdev_disk_changed()
1401 * udisks polling for legacy ide-cdrom devices. Use the crude check in bdev_disk_changed()
1407 !(disk->flags & GENHD_FL_REMOVABLE)) in bdev_disk_changed()
1410 if (disk->fops->revalidate_disk) in bdev_disk_changed()
1411 disk->fops->revalidate_disk(disk); in bdev_disk_changed()
1418 if (ret == -EAGAIN) in bdev_disk_changed()
1425 kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE); in bdev_disk_changed()
1439 * mutex_lock(part->bd_mutex)
1440 * mutex_lock_nested(whole->bd_mutex, 1)
1454 ret = -ENXIO; in __blkdev_get()
1462 ret = -ENOMEM; in __blkdev_get()
1479 mutex_lock_nested(&bdev->bd_mutex, for_part); in __blkdev_get()
1480 if (!bdev->bd_openers) { in __blkdev_get()
1482 bdev->bd_disk = disk; in __blkdev_get()
1483 bdev->bd_contains = bdev; in __blkdev_get()
1484 bdev->bd_partno = partno; in __blkdev_get()
1487 ret = -ENXIO; in __blkdev_get()
1488 bdev->bd_part = disk_get_part(disk, partno); in __blkdev_get()
1489 if (!bdev->bd_part) in __blkdev_get()
1493 if (disk->fops->open) { in __blkdev_get()
1494 ret = disk->fops->open(bdev, mode); in __blkdev_get()
1499 if (ret == -ERESTARTSYS) in __blkdev_get()
1510 * if open succeeded or failed with -ENOMEDIUM. in __blkdev_get()
1514 if (test_bit(GD_NEED_PART_SCAN, &disk->state) && in __blkdev_get()
1515 (!ret || ret == -ENOMEDIUM)) in __blkdev_get()
1516 bdev_disk_changed(bdev, ret == -ENOMEDIUM); in __blkdev_get()
1525 bdev->bd_contains = bdgrab(whole); in __blkdev_get()
1526 bdev->bd_part = disk_get_part(disk, partno); in __blkdev_get()
1527 if (!(disk->flags & GENHD_FL_UP) || in __blkdev_get()
1528 !bdev->bd_part || !bdev->bd_part->nr_sects) { in __blkdev_get()
1529 ret = -ENXIO; in __blkdev_get()
1532 bd_set_nr_sectors(bdev, bdev->bd_part->nr_sects); in __blkdev_get()
1536 if (bdev->bd_bdi == &noop_backing_dev_info) in __blkdev_get()
1537 bdev->bd_bdi = bdi_get(disk->queue->backing_dev_info); in __blkdev_get()
1539 if (bdev->bd_contains == bdev) { in __blkdev_get()
1541 if (bdev->bd_disk->fops->open) in __blkdev_get()
1542 ret = bdev->bd_disk->fops->open(bdev, mode); in __blkdev_get()
1544 if (test_bit(GD_NEED_PART_SCAN, &disk->state) && in __blkdev_get()
1545 (!ret || ret == -ENOMEDIUM)) in __blkdev_get()
1546 bdev_disk_changed(bdev, ret == -ENOMEDIUM); in __blkdev_get()
1551 bdev->bd_openers++; in __blkdev_get()
1553 bdev->bd_part_count++; in __blkdev_get()
1563 if (claiming && (mode & FMODE_WRITE) && !bdev->bd_write_holder && in __blkdev_get()
1564 (disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE)) { in __blkdev_get()
1565 bdev->bd_write_holder = true; in __blkdev_get()
1568 mutex_unlock(&bdev->bd_mutex); in __blkdev_get()
1581 disk_put_part(bdev->bd_part); in __blkdev_get()
1582 bdev->bd_disk = NULL; in __blkdev_get()
1583 bdev->bd_part = NULL; in __blkdev_get()
1584 if (bdev != bdev->bd_contains) in __blkdev_get()
1585 __blkdev_put(bdev->bd_contains, mode, 1); in __blkdev_get()
1586 bdev->bd_contains = NULL; in __blkdev_get()
1590 mutex_unlock(&bdev->bd_mutex); in __blkdev_get()
1604 * blkdev_get - open a block device
1620 * 0 on success, -errno on failure.
1630 ret = devcgroup_inode_permission(bdev->bd_inode, perm); in blkdev_get()
1645 * blkdev_get_by_path - open a block device by name
1659 * Pointer to block_device on success, ERR_PTR(-errno) on failure.
1677 return ERR_PTR(-EACCES); in blkdev_get_by_path()
1685 * blkdev_get_by_dev - open a block device by device number
1693 * Use it ONLY if you really do not have anything better - i.e. when
1696 * ever need it - reconsider your API.
1704 * Pointer to block_device on success, ERR_PTR(-errno) on failure.
1713 return ERR_PTR(-ENOMEM); in blkdev_get_by_dev()
1733 filp->f_flags |= O_LARGEFILE; in blkdev_open()
1735 filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC; in blkdev_open()
1737 if (filp->f_flags & O_NDELAY) in blkdev_open()
1738 filp->f_mode |= FMODE_NDELAY; in blkdev_open()
1739 if (filp->f_flags & O_EXCL) in blkdev_open()
1740 filp->f_mode |= FMODE_EXCL; in blkdev_open()
1741 if ((filp->f_flags & O_ACCMODE) == 3) in blkdev_open()
1742 filp->f_mode |= FMODE_WRITE_IOCTL; in blkdev_open()
1746 return -ENOMEM; in blkdev_open()
1748 filp->f_mapping = bdev->bd_inode->i_mapping; in blkdev_open()
1749 filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping); in blkdev_open()
1751 return blkdev_get(bdev, filp->f_mode, filp); in blkdev_open()
1756 struct gendisk *disk = bdev->bd_disk; in __blkdev_put()
1766 if (bdev->bd_openers == 1) in __blkdev_put()
1769 mutex_lock_nested(&bdev->bd_mutex, for_part); in __blkdev_put()
1771 bdev->bd_part_count--; in __blkdev_put()
1773 if (!--bdev->bd_openers) { in __blkdev_put()
1774 WARN_ON_ONCE(bdev->bd_holders); in __blkdev_put()
1780 if (bdev->bd_contains == bdev) { in __blkdev_put()
1781 if (disk->fops->release) in __blkdev_put()
1782 disk->fops->release(disk, mode); in __blkdev_put()
1784 if (!bdev->bd_openers) { in __blkdev_put()
1785 disk_put_part(bdev->bd_part); in __blkdev_put()
1786 bdev->bd_part = NULL; in __blkdev_put()
1787 bdev->bd_disk = NULL; in __blkdev_put()
1788 if (bdev != bdev->bd_contains) in __blkdev_put()
1789 victim = bdev->bd_contains; in __blkdev_put()
1790 bdev->bd_contains = NULL; in __blkdev_put()
1794 mutex_unlock(&bdev->bd_mutex); in __blkdev_put()
1802 mutex_lock(&bdev->bd_mutex); in blkdev_put()
1814 WARN_ON_ONCE(--bdev->bd_holders < 0); in blkdev_put()
1815 WARN_ON_ONCE(--bdev->bd_contains->bd_holders < 0); in blkdev_put()
1818 if ((bdev_free = !bdev->bd_holders)) in blkdev_put()
1819 bdev->bd_holder = NULL; in blkdev_put()
1820 if (!bdev->bd_contains->bd_holders) in blkdev_put()
1821 bdev->bd_contains->bd_holder = NULL; in blkdev_put()
1829 if (bdev_free && bdev->bd_write_holder) { in blkdev_put()
1830 disk_unblock_events(bdev->bd_disk); in blkdev_put()
1831 bdev->bd_write_holder = false; in blkdev_put()
1838 * from userland - e.g. eject(1). in blkdev_put()
1840 disk_flush_events(bdev->bd_disk, DISK_EVENT_MEDIA_CHANGE); in blkdev_put()
1842 mutex_unlock(&bdev->bd_mutex); in blkdev_put()
1851 blkdev_put(bdev, filp->f_mode); in blkdev_close()
1858 fmode_t mode = file->f_mode; in block_ioctl()
1864 if (file->f_flags & O_NDELAY) in block_ioctl()
1881 struct file *file = iocb->ki_filp; in blkdev_write_iter()
1888 return -EPERM; in blkdev_write_iter()
1890 if (IS_SWAPFILE(bd_inode) && !is_hibernate_resume_dev(bd_inode->i_rdev)) in blkdev_write_iter()
1891 return -ETXTBSY; in blkdev_write_iter()
1896 if (iocb->ki_pos >= size) in blkdev_write_iter()
1897 return -ENOSPC; in blkdev_write_iter()
1899 if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT) in blkdev_write_iter()
1900 return -EOPNOTSUPP; in blkdev_write_iter()
1902 iov_iter_truncate(from, size - iocb->ki_pos); in blkdev_write_iter()
1915 struct file *file = iocb->ki_filp; in blkdev_read_iter()
1918 loff_t pos = iocb->ki_pos; in blkdev_read_iter()
1923 size -= pos; in blkdev_read_iter()
1935 struct super_block *super = BDEV_I(page->mapping->host)->bdev.bd_super; in blkdev_releasepage()
1937 if (super && super->s_op->bdev_try_to_free_page) in blkdev_releasepage()
1938 return super->s_op->bdev_try_to_free_page(super, page, wait); in blkdev_releasepage()
1970 loff_t end = start + len - 1; in blkdev_fallocate()
1976 return -EOPNOTSUPP; in blkdev_fallocate()
1979 isize = i_size_read(bdev->bd_inode); in blkdev_fallocate()
1981 return -EINVAL; in blkdev_fallocate()
1984 len = isize - start; in blkdev_fallocate()
1985 end = start + len - 1; in blkdev_fallocate()
1987 return -EINVAL; in blkdev_fallocate()
1993 if ((start | len) & (bdev_logical_block_size(bdev) - 1)) in blkdev_fallocate()
1994 return -EINVAL; in blkdev_fallocate()
1997 error = truncate_bdev_range(bdev, file->f_mode, start, end); in blkdev_fallocate()
2016 return -EOPNOTSUPP; in blkdev_fallocate()
2023 * the caller will be given -EBUSY. The third argument is in blkdev_fallocate()
2026 return invalidate_inode_pages2_range(bdev->bd_inode->i_mapping, in blkdev_fallocate()
2050 * lookup_bdev - lookup a struct block_device by name
2065 return ERR_PTR(-EINVAL); in lookup_bdev()
2072 error = -ENOTBLK; in lookup_bdev()
2073 if (!S_ISBLK(inode->i_mode)) in lookup_bdev()
2075 error = -EACCES; in lookup_bdev()
2078 error = -ENOMEM; in lookup_bdev()
2100 * under us (->put_super runs with the write lock in __invalidate_device()
2116 spin_lock(&blockdev_superblock->s_inode_list_lock); in iterate_bdevs()
2117 list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) { in iterate_bdevs()
2118 struct address_space *mapping = inode->i_mapping; in iterate_bdevs()
2121 spin_lock(&inode->i_lock); in iterate_bdevs()
2122 if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) || in iterate_bdevs()
2123 mapping->nrpages == 0) { in iterate_bdevs()
2124 spin_unlock(&inode->i_lock); in iterate_bdevs()
2128 spin_unlock(&inode->i_lock); in iterate_bdevs()
2129 spin_unlock(&blockdev_superblock->s_inode_list_lock); in iterate_bdevs()
2142 mutex_lock(&bdev->bd_mutex); in iterate_bdevs()
2143 if (bdev->bd_openers) in iterate_bdevs()
2145 mutex_unlock(&bdev->bd_mutex); in iterate_bdevs()
2147 spin_lock(&blockdev_superblock->s_inode_list_lock); in iterate_bdevs()
2149 spin_unlock(&blockdev_superblock->s_inode_list_lock); in iterate_bdevs()