1f21e6e18SChristoph Hellwig // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 3f21e6e18SChristoph Hellwig * Copyright 1993 by Theodore Ts'o. 41da177e4SLinus Torvalds */ 51da177e4SLinus Torvalds #include <linux/module.h> 61da177e4SLinus Torvalds #include <linux/moduleparam.h> 71da177e4SLinus Torvalds #include <linux/sched.h> 81da177e4SLinus Torvalds #include <linux/fs.h> 94ee60ec1SMatthew Wilcox (Oracle) #include <linux/pagemap.h> 101da177e4SLinus Torvalds #include <linux/file.h> 111da177e4SLinus Torvalds #include <linux/stat.h> 121da177e4SLinus Torvalds #include <linux/errno.h> 131da177e4SLinus Torvalds #include <linux/major.h> 141da177e4SLinus Torvalds #include <linux/wait.h> 151da177e4SLinus Torvalds #include <linux/blkpg.h> 161da177e4SLinus Torvalds #include <linux/init.h> 171da177e4SLinus Torvalds #include <linux/swap.h> 181da177e4SLinus Torvalds #include <linux/slab.h> 19863d5b82SDavid Howells #include <linux/compat.h> 201da177e4SLinus Torvalds #include <linux/suspend.h> 2183144186SRafael J. Wysocki #include <linux/freezer.h> 222a48fc0aSArnd Bergmann #include <linux/mutex.h> 231da177e4SLinus Torvalds #include <linux/writeback.h> 241da177e4SLinus Torvalds #include <linux/completion.h> 251da177e4SLinus Torvalds #include <linux/highmem.h> 26d6b29d7cSJens Axboe #include <linux/splice.h> 27ee862730SMilan Broz #include <linux/sysfs.h> 28770fe30aSKay Sievers #include <linux/miscdevice.h> 29dfaa2ef6SLukas Czerner #include <linux/falloc.h> 30283e7e5dSAl Viro #include <linux/uio.h> 31d9a08a9eSAdam Manzanares #include <linux/ioprio.h> 32db6638d7SDennis Zhou #include <linux/blk-cgroup.h> 33c74d40e8SDan Schatzberg #include <linux/sched/mm.h> 3406582bc8SMing Lei #include <linux/statfs.h> 357c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 36754d9679SChristoph Hellwig #include <linux/blk-mq.h> 37754d9679SChristoph Hellwig #include <linux/spinlock.h> 38754d9679SChristoph Hellwig #include <uapi/linux/loop.h> 39754d9679SChristoph Hellwig 40754d9679SChristoph Hellwig /* Possible states of device */ 41754d9679SChristoph Hellwig enum { 42754d9679SChristoph Hellwig Lo_unbound, 43754d9679SChristoph Hellwig Lo_bound, 44754d9679SChristoph Hellwig Lo_rundown, 45754d9679SChristoph Hellwig Lo_deleting, 46754d9679SChristoph Hellwig }; 47754d9679SChristoph Hellwig 48754d9679SChristoph Hellwig struct loop_device { 49754d9679SChristoph Hellwig int lo_number; 50754d9679SChristoph Hellwig loff_t lo_offset; 51754d9679SChristoph Hellwig loff_t lo_sizelimit; 52754d9679SChristoph Hellwig int lo_flags; 53754d9679SChristoph Hellwig char lo_file_name[LO_NAME_SIZE]; 54754d9679SChristoph Hellwig 55754d9679SChristoph Hellwig struct file *lo_backing_file; 56f4774e92SChristoph Hellwig unsigned int lo_min_dio_size; 57754d9679SChristoph Hellwig struct block_device *lo_device; 58754d9679SChristoph Hellwig 59754d9679SChristoph Hellwig gfp_t old_gfp_mask; 60754d9679SChristoph Hellwig 61754d9679SChristoph Hellwig spinlock_t lo_lock; 62754d9679SChristoph Hellwig int lo_state; 63754d9679SChristoph Hellwig spinlock_t lo_work_lock; 64754d9679SChristoph Hellwig struct workqueue_struct *workqueue; 65754d9679SChristoph Hellwig struct work_struct rootcg_work; 66754d9679SChristoph Hellwig struct list_head rootcg_cmd_list; 67754d9679SChristoph Hellwig struct list_head idle_worker_list; 68754d9679SChristoph Hellwig struct rb_root worker_tree; 69754d9679SChristoph Hellwig struct timer_list timer; 70754d9679SChristoph Hellwig bool sysfs_inited; 71754d9679SChristoph Hellwig 72754d9679SChristoph Hellwig struct request_queue *lo_queue; 73754d9679SChristoph Hellwig struct blk_mq_tag_set tag_set; 74754d9679SChristoph Hellwig struct gendisk *lo_disk; 75754d9679SChristoph Hellwig struct mutex lo_mutex; 76754d9679SChristoph Hellwig bool idr_visible; 77754d9679SChristoph Hellwig }; 78754d9679SChristoph Hellwig 79754d9679SChristoph Hellwig struct loop_cmd { 80754d9679SChristoph Hellwig struct list_head list_entry; 81754d9679SChristoph Hellwig bool use_aio; /* use AIO interface to handle I/O */ 82754d9679SChristoph Hellwig atomic_t ref; /* only for aio */ 83754d9679SChristoph Hellwig long ret; 84754d9679SChristoph Hellwig struct kiocb iocb; 85754d9679SChristoph Hellwig struct bio_vec *bvec; 86754d9679SChristoph Hellwig struct cgroup_subsys_state *blkcg_css; 87754d9679SChristoph Hellwig struct cgroup_subsys_state *memcg_css; 88754d9679SChristoph Hellwig }; 891da177e4SLinus Torvalds 9087579e9bSDan Schatzberg #define LOOP_IDLE_WORKER_TIMEOUT (60 * HZ) 91e152a05fSBart Van Assche #define LOOP_DEFAULT_HW_Q_DEPTH 128 9287579e9bSDan Schatzberg 9334dd82afSKay Sievers static DEFINE_IDR(loop_index_idr); 94310ca162STetsuo Handa static DEFINE_MUTEX(loop_ctl_mutex); 953ce6e1f6STetsuo Handa static DEFINE_MUTEX(loop_validate_mutex); 963ce6e1f6STetsuo Handa 973ce6e1f6STetsuo Handa /** 983ce6e1f6STetsuo Handa * loop_global_lock_killable() - take locks for safe loop_validate_file() test 993ce6e1f6STetsuo Handa * 1003ce6e1f6STetsuo Handa * @lo: struct loop_device 1013ce6e1f6STetsuo Handa * @global: true if @lo is about to bind another "struct loop_device", false otherwise 1023ce6e1f6STetsuo Handa * 1033ce6e1f6STetsuo Handa * Returns 0 on success, -EINTR otherwise. 1043ce6e1f6STetsuo Handa * 1053ce6e1f6STetsuo Handa * Since loop_validate_file() traverses on other "struct loop_device" if 1063ce6e1f6STetsuo Handa * is_loop_device() is true, we need a global lock for serializing concurrent 1073ce6e1f6STetsuo Handa * loop_configure()/loop_change_fd()/__loop_clr_fd() calls. 1083ce6e1f6STetsuo Handa */ 1093ce6e1f6STetsuo Handa static int loop_global_lock_killable(struct loop_device *lo, bool global) 1103ce6e1f6STetsuo Handa { 1113ce6e1f6STetsuo Handa int err; 1123ce6e1f6STetsuo Handa 1133ce6e1f6STetsuo Handa if (global) { 1143ce6e1f6STetsuo Handa err = mutex_lock_killable(&loop_validate_mutex); 1153ce6e1f6STetsuo Handa if (err) 1163ce6e1f6STetsuo Handa return err; 1173ce6e1f6STetsuo Handa } 1183ce6e1f6STetsuo Handa err = mutex_lock_killable(&lo->lo_mutex); 1193ce6e1f6STetsuo Handa if (err && global) 1203ce6e1f6STetsuo Handa mutex_unlock(&loop_validate_mutex); 1213ce6e1f6STetsuo Handa return err; 1223ce6e1f6STetsuo Handa } 1233ce6e1f6STetsuo Handa 1243ce6e1f6STetsuo Handa /** 1253ce6e1f6STetsuo Handa * loop_global_unlock() - release locks taken by loop_global_lock_killable() 1263ce6e1f6STetsuo Handa * 1273ce6e1f6STetsuo Handa * @lo: struct loop_device 1283ce6e1f6STetsuo Handa * @global: true if @lo was about to bind another "struct loop_device", false otherwise 1293ce6e1f6STetsuo Handa */ 1303ce6e1f6STetsuo Handa static void loop_global_unlock(struct loop_device *lo, bool global) 1313ce6e1f6STetsuo Handa { 1323ce6e1f6STetsuo Handa mutex_unlock(&lo->lo_mutex); 1333ce6e1f6STetsuo Handa if (global) 1343ce6e1f6STetsuo Handa mutex_unlock(&loop_validate_mutex); 1353ce6e1f6STetsuo Handa } 1361da177e4SLinus Torvalds 137476a4813SLaurent Vivier static int max_part; 138476a4813SLaurent Vivier static int part_shift; 139476a4813SLaurent Vivier 1407035b5dfSDmitry Monakhov static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file) 1411da177e4SLinus Torvalds { 142b7a1da69SGuo Chao loff_t loopsize; 1431da177e4SLinus Torvalds 1441da177e4SLinus Torvalds /* Compute loopsize in bytes */ 145b7a1da69SGuo Chao loopsize = i_size_read(file->f_mapping->host); 146b7a1da69SGuo Chao if (offset > 0) 147b7a1da69SGuo Chao loopsize -= offset; 148b7a1da69SGuo Chao /* offset is beyond i_size, weird but possible */ 1497035b5dfSDmitry Monakhov if (loopsize < 0) 1507035b5dfSDmitry Monakhov return 0; 1511da177e4SLinus Torvalds 1527035b5dfSDmitry Monakhov if (sizelimit > 0 && sizelimit < loopsize) 1537035b5dfSDmitry Monakhov loopsize = sizelimit; 1541da177e4SLinus Torvalds /* 1551da177e4SLinus Torvalds * Unfortunately, if we want to do I/O on the device, 1561da177e4SLinus Torvalds * the number of 512-byte sectors has to fit into a sector_t. 1571da177e4SLinus Torvalds */ 1581da177e4SLinus Torvalds return loopsize >> 9; 1591da177e4SLinus Torvalds } 1601da177e4SLinus Torvalds 1617035b5dfSDmitry Monakhov static loff_t get_loop_size(struct loop_device *lo, struct file *file) 1621da177e4SLinus Torvalds { 1637035b5dfSDmitry Monakhov return get_size(lo->lo_offset, lo->lo_sizelimit, file); 1647035b5dfSDmitry Monakhov } 1657035b5dfSDmitry Monakhov 166baa7d536SChristoph Hellwig /* 167baa7d536SChristoph Hellwig * We support direct I/O only if lo_offset is aligned with the logical I/O size 168baa7d536SChristoph Hellwig * of backing device, and the logical block size of loop is bigger than that of 169baa7d536SChristoph Hellwig * the backing device. 170baa7d536SChristoph Hellwig */ 171781fc49aSChristoph Hellwig static bool lo_can_use_dio(struct loop_device *lo) 1722e5ab5f3SMing Lei { 173781fc49aSChristoph Hellwig if (!(lo->lo_backing_file->f_mode & FMODE_CAN_ODIRECT)) 174781fc49aSChristoph Hellwig return false; 175f4774e92SChristoph Hellwig if (queue_logical_block_size(lo->lo_queue) < lo->lo_min_dio_size) 176f4774e92SChristoph Hellwig return false; 177f4774e92SChristoph Hellwig if (lo->lo_offset & (lo->lo_min_dio_size - 1)) 178f4774e92SChristoph Hellwig return false; 179781fc49aSChristoph Hellwig return true; 180781fc49aSChristoph Hellwig } 1812e5ab5f3SMing Lei 182afd69d5cSChristoph Hellwig /* 183afd69d5cSChristoph Hellwig * Direct I/O can be enabled either by using an O_DIRECT file descriptor, or by 184afd69d5cSChristoph Hellwig * passing in the LO_FLAGS_DIRECT_IO flag from userspace. It will be silently 185afd69d5cSChristoph Hellwig * disabled when the device block size is too small or the offset is unaligned. 186afd69d5cSChristoph Hellwig * 187afd69d5cSChristoph Hellwig * loop_get_status will always report the effective LO_FLAGS_DIRECT_IO flag and 188afd69d5cSChristoph Hellwig * not the originally passed in one. 189afd69d5cSChristoph Hellwig */ 1903a693110SChristoph Hellwig static inline void loop_update_dio(struct loop_device *lo) 191781fc49aSChristoph Hellwig { 1920cd719aaSChristoph Hellwig lockdep_assert_held(&lo->lo_mutex); 1930cd719aaSChristoph Hellwig WARN_ON_ONCE(lo->lo_state == Lo_bound && 1940cd719aaSChristoph Hellwig lo->lo_queue->mq_freeze_depth == 0); 1950cd719aaSChristoph Hellwig 196afd69d5cSChristoph Hellwig if ((lo->lo_flags & LO_FLAGS_DIRECT_IO) && !lo_can_use_dio(lo)) 1972e5ab5f3SMing Lei lo->lo_flags &= ~LO_FLAGS_DIRECT_IO; 1982e5ab5f3SMing Lei } 1992e5ab5f3SMing Lei 2005795b6f5SMartijn Coenen /** 2015795b6f5SMartijn Coenen * loop_set_size() - sets device size and notifies userspace 2025795b6f5SMartijn Coenen * @lo: struct loop_device to set the size for 2035795b6f5SMartijn Coenen * @size: new size of the loop device 2045795b6f5SMartijn Coenen * 2055795b6f5SMartijn Coenen * Callers must validate that the size passed into this function fits into 2065795b6f5SMartijn Coenen * a sector_t, eg using loop_validate_size() 2075795b6f5SMartijn Coenen */ 2085795b6f5SMartijn Coenen static void loop_set_size(struct loop_device *lo, loff_t size) 2095795b6f5SMartijn Coenen { 210449f4ec9SChristoph Hellwig if (!set_capacity_and_notify(lo->lo_disk, size)) 2113b4f85d0SChristoph Hellwig kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE); 2121da177e4SLinus Torvalds } 2131da177e4SLinus Torvalds 214aa4d8616SChristoph Hellwig static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos) 2151da177e4SLinus Torvalds { 216aa4d8616SChristoph Hellwig struct iov_iter i; 2171da177e4SLinus Torvalds ssize_t bw; 218283e7e5dSAl Viro 219de4eda9dSAl Viro iov_iter_bvec(&i, ITER_SOURCE, bvec, 1, bvec->bv_len); 2201da177e4SLinus Torvalds 221abbb6589SChristoph Hellwig bw = vfs_iter_write(file, &i, ppos, 0); 222aa4d8616SChristoph Hellwig 223aa4d8616SChristoph Hellwig if (likely(bw == bvec->bv_len)) 2241da177e4SLinus Torvalds return 0; 225aa4d8616SChristoph Hellwig 226aa4d8616SChristoph Hellwig printk_ratelimited(KERN_ERR 227aa4d8616SChristoph Hellwig "loop: Write error at byte offset %llu, length %i.\n", 228aa4d8616SChristoph Hellwig (unsigned long long)*ppos, bvec->bv_len); 2291da177e4SLinus Torvalds if (bw >= 0) 2301da177e4SLinus Torvalds bw = -EIO; 2311da177e4SLinus Torvalds return bw; 2321da177e4SLinus Torvalds } 2331da177e4SLinus Torvalds 234aa4d8616SChristoph Hellwig static int lo_write_simple(struct loop_device *lo, struct request *rq, 235aa4d8616SChristoph Hellwig loff_t pos) 2361da177e4SLinus Torvalds { 237aa4d8616SChristoph Hellwig struct bio_vec bvec; 238aa4d8616SChristoph Hellwig struct req_iterator iter; 239aa4d8616SChristoph Hellwig int ret = 0; 240aa4d8616SChristoph Hellwig 241aa4d8616SChristoph Hellwig rq_for_each_segment(bvec, rq, iter) { 242aa4d8616SChristoph Hellwig ret = lo_write_bvec(lo->lo_backing_file, &bvec, &pos); 243aa4d8616SChristoph Hellwig if (ret < 0) 244aa4d8616SChristoph Hellwig break; 2451da177e4SLinus Torvalds cond_resched(); 2461da177e4SLinus Torvalds } 2471da177e4SLinus Torvalds 248aa4d8616SChristoph Hellwig return ret; 249aa4d8616SChristoph Hellwig } 250aa4d8616SChristoph Hellwig 251aa4d8616SChristoph Hellwig static int lo_read_simple(struct loop_device *lo, struct request *rq, 252aa4d8616SChristoph Hellwig loff_t pos) 2531da177e4SLinus Torvalds { 2547988613bSKent Overstreet struct bio_vec bvec; 25530112013SMing Lei struct req_iterator iter; 256aa4d8616SChristoph Hellwig struct iov_iter i; 257aa4d8616SChristoph Hellwig ssize_t len; 2581da177e4SLinus Torvalds 25930112013SMing Lei rq_for_each_segment(bvec, rq, iter) { 260de4eda9dSAl Viro iov_iter_bvec(&i, ITER_DEST, &bvec, 1, bvec.bv_len); 26118e9710eSChristoph Hellwig len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0); 262aa4d8616SChristoph Hellwig if (len < 0) 263aa4d8616SChristoph Hellwig return len; 264306df071SDave Young 265aa4d8616SChristoph Hellwig flush_dcache_page(bvec.bv_page); 266aa4d8616SChristoph Hellwig 267aa4d8616SChristoph Hellwig if (len != bvec.bv_len) { 26830112013SMing Lei struct bio *bio; 26930112013SMing Lei 27030112013SMing Lei __rq_for_each_bio(bio, rq) 271306df071SDave Young zero_fill_bio(bio); 2721da177e4SLinus Torvalds break; 273306df071SDave Young } 274aa4d8616SChristoph Hellwig cond_resched(); 2751da177e4SLinus Torvalds } 276aa4d8616SChristoph Hellwig 277306df071SDave Young return 0; 2781da177e4SLinus Torvalds } 2791da177e4SLinus Torvalds 2805f75e081SCyril Hrubis static void loop_clear_limits(struct loop_device *lo, int mode) 2815f75e081SCyril Hrubis { 2825f75e081SCyril Hrubis struct queue_limits lim = queue_limits_start_update(lo->lo_queue); 2835f75e081SCyril Hrubis 2845f75e081SCyril Hrubis if (mode & FALLOC_FL_ZERO_RANGE) 2855f75e081SCyril Hrubis lim.max_write_zeroes_sectors = 0; 2865f75e081SCyril Hrubis 2875f75e081SCyril Hrubis if (mode & FALLOC_FL_PUNCH_HOLE) { 2885f75e081SCyril Hrubis lim.max_hw_discard_sectors = 0; 2895f75e081SCyril Hrubis lim.discard_granularity = 0; 2905f75e081SCyril Hrubis } 2915f75e081SCyril Hrubis 292b03732a9SChristoph Hellwig /* 293b03732a9SChristoph Hellwig * XXX: this updates the queue limits without freezing the queue, which 294b03732a9SChristoph Hellwig * is against the locking protocol and dangerous. But we can't just 295b03732a9SChristoph Hellwig * freeze the queue as we're inside the ->queue_rq method here. So this 296b03732a9SChristoph Hellwig * should move out into a workqueue unless we get the file operations to 297b03732a9SChristoph Hellwig * advertise if they support specific fallocate operations. 298b03732a9SChristoph Hellwig */ 2995f75e081SCyril Hrubis queue_limits_commit_update(lo->lo_queue, &lim); 3005f75e081SCyril Hrubis } 3015f75e081SCyril Hrubis 302efcfec57SDarrick J. Wong static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos, 303efcfec57SDarrick J. Wong int mode) 304cf655d95SMing Lei { 305cf655d95SMing Lei /* 306efcfec57SDarrick J. Wong * We use fallocate to manipulate the space mappings used by the image 30747e96246SChristoph Hellwig * a.k.a. discard/zerorange. 308cf655d95SMing Lei */ 309cf655d95SMing Lei struct file *file = lo->lo_backing_file; 310cf655d95SMing Lei int ret; 311cf655d95SMing Lei 312efcfec57SDarrick J. Wong mode |= FALLOC_FL_KEEP_SIZE; 313efcfec57SDarrick J. Wong 31470200574SChristoph Hellwig if (!bdev_max_discard_sectors(lo->lo_device)) 31570200574SChristoph Hellwig return -EOPNOTSUPP; 316cf655d95SMing Lei 317cf655d95SMing Lei ret = file->f_op->fallocate(file, mode, pos, blk_rq_bytes(rq)); 318cf655d95SMing Lei if (unlikely(ret && ret != -EINVAL && ret != -EOPNOTSUPP)) 31970200574SChristoph Hellwig return -EIO; 3205f75e081SCyril Hrubis 3215f75e081SCyril Hrubis /* 3225f75e081SCyril Hrubis * We initially configure the limits in a hope that fallocate is 3235f75e081SCyril Hrubis * supported and clear them here if that turns out not to be true. 3245f75e081SCyril Hrubis */ 3255f75e081SCyril Hrubis if (unlikely(ret == -EOPNOTSUPP)) 3265f75e081SCyril Hrubis loop_clear_limits(lo, mode); 3275f75e081SCyril Hrubis 328cf655d95SMing Lei return ret; 329cf655d95SMing Lei } 330cf655d95SMing Lei 331cf655d95SMing Lei static int lo_req_flush(struct loop_device *lo, struct request *rq) 332cf655d95SMing Lei { 3339c64e38cSChaitanya Kulkarni int ret = vfs_fsync(lo->lo_backing_file, 0); 334cf655d95SMing Lei if (unlikely(ret && ret != -EINVAL)) 335cf655d95SMing Lei ret = -EIO; 336cf655d95SMing Lei 337cf655d95SMing Lei return ret; 338cf655d95SMing Lei } 339cf655d95SMing Lei 340fe2cb290SChristoph Hellwig static void lo_complete_rq(struct request *rq) 341bc07c10aSMing Lei { 342fe2cb290SChristoph Hellwig struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); 343f9de14bcSJens Axboe blk_status_t ret = BLK_STS_OK; 344bc07c10aSMing Lei 345f9de14bcSJens Axboe if (!cmd->use_aio || cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) || 346f9de14bcSJens Axboe req_op(rq) != REQ_OP_READ) { 347f9de14bcSJens Axboe if (cmd->ret < 0) 3488cd55087SEvan Green ret = errno_to_blk_status(cmd->ret); 349f9de14bcSJens Axboe goto end_io; 350bc07c10aSMing Lei } 351fe2cb290SChristoph Hellwig 352f9de14bcSJens Axboe /* 353f9de14bcSJens Axboe * Short READ - if we got some data, advance our request and 354f9de14bcSJens Axboe * retry it. If we got no data, end the rest with EIO. 355f9de14bcSJens Axboe */ 356f9de14bcSJens Axboe if (cmd->ret) { 357f9de14bcSJens Axboe blk_update_request(rq, BLK_STS_OK, cmd->ret); 358f9de14bcSJens Axboe cmd->ret = 0; 359f9de14bcSJens Axboe blk_mq_requeue_request(rq, true); 360f9de14bcSJens Axboe } else { 361f9de14bcSJens Axboe if (cmd->use_aio) { 362f9de14bcSJens Axboe struct bio *bio = rq->bio; 363f9de14bcSJens Axboe 364f9de14bcSJens Axboe while (bio) { 365f9de14bcSJens Axboe zero_fill_bio(bio); 366f9de14bcSJens Axboe bio = bio->bi_next; 367f9de14bcSJens Axboe } 368f9de14bcSJens Axboe } 369f9de14bcSJens Axboe ret = BLK_STS_IOERR; 370f9de14bcSJens Axboe end_io: 371f9de14bcSJens Axboe blk_mq_end_request(rq, ret); 372f9de14bcSJens Axboe } 373bc07c10aSMing Lei } 374bc07c10aSMing Lei 37592d77332SShaohua Li static void lo_rw_aio_do_completion(struct loop_cmd *cmd) 37692d77332SShaohua Li { 3771894e916SJens Axboe struct request *rq = blk_mq_rq_from_pdu(cmd); 3781894e916SJens Axboe 37992d77332SShaohua Li if (!atomic_dec_and_test(&cmd->ref)) 38092d77332SShaohua Li return; 38192d77332SShaohua Li kfree(cmd->bvec); 38292d77332SShaohua Li cmd->bvec = NULL; 38315f73f5bSChristoph Hellwig if (likely(!blk_should_fake_timeout(rq->q))) 3841894e916SJens Axboe blk_mq_complete_request(rq); 38592d77332SShaohua Li } 38692d77332SShaohua Li 3876b19b766SJens Axboe static void lo_rw_aio_complete(struct kiocb *iocb, long ret) 388bc07c10aSMing Lei { 389bc07c10aSMing Lei struct loop_cmd *cmd = container_of(iocb, struct loop_cmd, iocb); 390bc07c10aSMing Lei 391fe2cb290SChristoph Hellwig cmd->ret = ret; 39292d77332SShaohua Li lo_rw_aio_do_completion(cmd); 393bc07c10aSMing Lei } 394bc07c10aSMing Lei 395bc07c10aSMing Lei static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd, 396de4eda9dSAl Viro loff_t pos, int rw) 397bc07c10aSMing Lei { 398bc07c10aSMing Lei struct iov_iter iter; 39986af5952SMing Lei struct req_iterator rq_iter; 400bc07c10aSMing Lei struct bio_vec *bvec; 4011894e916SJens Axboe struct request *rq = blk_mq_rq_from_pdu(cmd); 40240326d8aSShaohua Li struct bio *bio = rq->bio; 403bc07c10aSMing Lei struct file *file = lo->lo_backing_file; 40486af5952SMing Lei struct bio_vec tmp; 40540326d8aSShaohua Li unsigned int offset; 40686af5952SMing Lei int nr_bvec = 0; 407bc07c10aSMing Lei int ret; 408bc07c10aSMing Lei 40986af5952SMing Lei rq_for_each_bvec(tmp, rq, rq_iter) 41086af5952SMing Lei nr_bvec++; 411bc07c10aSMing Lei 41286af5952SMing Lei if (rq->bio != rq->biotail) { 41386af5952SMing Lei 41486af5952SMing Lei bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec), 4156da2ec56SKees Cook GFP_NOIO); 41640326d8aSShaohua Li if (!bvec) 41740326d8aSShaohua Li return -EIO; 41840326d8aSShaohua Li cmd->bvec = bvec; 41940326d8aSShaohua Li 420a7297a6aSMing Lei /* 42140326d8aSShaohua Li * The bios of the request may be started from the middle of 42240326d8aSShaohua Li * the 'bvec' because of bio splitting, so we can't directly 42386af5952SMing Lei * copy bio->bi_iov_vec to new bvec. The rq_for_each_bvec 42440326d8aSShaohua Li * API will take care of all details for us. 425a7297a6aSMing Lei */ 42686af5952SMing Lei rq_for_each_bvec(tmp, rq, rq_iter) { 42740326d8aSShaohua Li *bvec = tmp; 42840326d8aSShaohua Li bvec++; 42940326d8aSShaohua Li } 43040326d8aSShaohua Li bvec = cmd->bvec; 43140326d8aSShaohua Li offset = 0; 43240326d8aSShaohua Li } else { 43340326d8aSShaohua Li /* 43440326d8aSShaohua Li * Same here, this bio may be started from the middle of the 43540326d8aSShaohua Li * 'bvec' because of bio splitting, so offset from the bvec 43640326d8aSShaohua Li * must be passed to iov iterator 43740326d8aSShaohua Li */ 43840326d8aSShaohua Li offset = bio->bi_iter.bi_bvec_done; 43940326d8aSShaohua Li bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); 44040326d8aSShaohua Li } 44192d77332SShaohua Li atomic_set(&cmd->ref, 2); 44240326d8aSShaohua Li 443b6207430SChristoph Hellwig iov_iter_bvec(&iter, rw, bvec, nr_bvec, blk_rq_bytes(rq)); 44440326d8aSShaohua Li iter.iov_offset = offset; 445bc07c10aSMing Lei 446bc07c10aSMing Lei cmd->iocb.ki_pos = pos; 447bc07c10aSMing Lei cmd->iocb.ki_filp = file; 448bc07c10aSMing Lei cmd->iocb.ki_complete = lo_rw_aio_complete; 449bc07c10aSMing Lei cmd->iocb.ki_flags = IOCB_DIRECT; 4501fdb8188SYunlong Xing cmd->iocb.ki_ioprio = req_get_ioprio(rq); 451bc07c10aSMing Lei 452de4eda9dSAl Viro if (rw == ITER_SOURCE) 4537c98f7cbSMiklos Szeredi ret = file->f_op->write_iter(&cmd->iocb, &iter); 454bc07c10aSMing Lei else 4557c98f7cbSMiklos Szeredi ret = file->f_op->read_iter(&cmd->iocb, &iter); 456bc07c10aSMing Lei 45792d77332SShaohua Li lo_rw_aio_do_completion(cmd); 45892d77332SShaohua Li 459bc07c10aSMing Lei if (ret != -EIOCBQUEUED) 4606b19b766SJens Axboe lo_rw_aio_complete(&cmd->iocb, ret); 461bc07c10aSMing Lei return 0; 462bc07c10aSMing Lei } 463bc07c10aSMing Lei 464c1c87c2bSChristoph Hellwig static int do_req_filebacked(struct loop_device *lo, struct request *rq) 465bc07c10aSMing Lei { 466bc07c10aSMing Lei struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); 467c1c87c2bSChristoph Hellwig loff_t pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset; 468bc07c10aSMing Lei 469bc07c10aSMing Lei /* 470bc07c10aSMing Lei * lo_write_simple and lo_read_simple should have been covered 471bc07c10aSMing Lei * by io submit style function like lo_rw_aio(), one blocker 472bc07c10aSMing Lei * is that lo_read_simple() need to call flush_dcache_page after 473bc07c10aSMing Lei * the page is written from kernel, and it isn't easy to handle 474bc07c10aSMing Lei * this in io submit style function which submits all segments 475bc07c10aSMing Lei * of the req at one time. And direct read IO doesn't need to 476bc07c10aSMing Lei * run flush_dcache_page(). 477bc07c10aSMing Lei */ 478c1c87c2bSChristoph Hellwig switch (req_op(rq)) { 479c1c87c2bSChristoph Hellwig case REQ_OP_FLUSH: 480c1c87c2bSChristoph Hellwig return lo_req_flush(lo, rq); 48119372e27SChristoph Hellwig case REQ_OP_WRITE_ZEROES: 482efcfec57SDarrick J. Wong /* 483efcfec57SDarrick J. Wong * If the caller doesn't want deallocation, call zeroout to 484efcfec57SDarrick J. Wong * write zeroes the range. Otherwise, punch them out. 485efcfec57SDarrick J. Wong */ 486efcfec57SDarrick J. Wong return lo_fallocate(lo, rq, pos, 487efcfec57SDarrick J. Wong (rq->cmd_flags & REQ_NOUNMAP) ? 488efcfec57SDarrick J. Wong FALLOC_FL_ZERO_RANGE : 489efcfec57SDarrick J. Wong FALLOC_FL_PUNCH_HOLE); 490efcfec57SDarrick J. Wong case REQ_OP_DISCARD: 491efcfec57SDarrick J. Wong return lo_fallocate(lo, rq, pos, FALLOC_FL_PUNCH_HOLE); 492c1c87c2bSChristoph Hellwig case REQ_OP_WRITE: 49347e96246SChristoph Hellwig if (cmd->use_aio) 494de4eda9dSAl Viro return lo_rw_aio(lo, cmd, pos, ITER_SOURCE); 495c1c87c2bSChristoph Hellwig else 496bc07c10aSMing Lei return lo_write_simple(lo, rq, pos); 497c1c87c2bSChristoph Hellwig case REQ_OP_READ: 49847e96246SChristoph Hellwig if (cmd->use_aio) 499de4eda9dSAl Viro return lo_rw_aio(lo, cmd, pos, ITER_DEST); 500bc07c10aSMing Lei else 501bc07c10aSMing Lei return lo_read_simple(lo, rq, pos); 502c1c87c2bSChristoph Hellwig default: 503c1c87c2bSChristoph Hellwig WARN_ON_ONCE(1); 504c1c87c2bSChristoph Hellwig return -EIO; 505bc07c10aSMing Lei } 5061da177e4SLinus Torvalds } 5071da177e4SLinus Torvalds 5080384264eSChristoph Hellwig static void loop_reread_partitions(struct loop_device *lo) 50906f0e9e6SMing Lei { 51006f0e9e6SMing Lei int rc; 51106f0e9e6SMing Lei 5120384264eSChristoph Hellwig mutex_lock(&lo->lo_disk->open_mutex); 5130384264eSChristoph Hellwig rc = bdev_disk_changed(lo->lo_disk, false); 5140384264eSChristoph Hellwig mutex_unlock(&lo->lo_disk->open_mutex); 51506f0e9e6SMing Lei if (rc) 51606f0e9e6SMing Lei pr_warn("%s: partition scan of loop%d (%s) failed (rc=%d)\n", 51706f0e9e6SMing Lei __func__, lo->lo_number, lo->lo_file_name, rc); 51806f0e9e6SMing Lei } 51906f0e9e6SMing Lei 520f4774e92SChristoph Hellwig static unsigned int loop_query_min_dio_size(struct loop_device *lo) 521f4774e92SChristoph Hellwig { 522f4774e92SChristoph Hellwig struct file *file = lo->lo_backing_file; 523f4774e92SChristoph Hellwig struct block_device *sb_bdev = file->f_mapping->host->i_sb->s_bdev; 524f4774e92SChristoph Hellwig struct kstat st; 525f4774e92SChristoph Hellwig 526f4774e92SChristoph Hellwig /* 527f4774e92SChristoph Hellwig * Use the minimal dio alignment of the file system if provided. 528f4774e92SChristoph Hellwig */ 529f4774e92SChristoph Hellwig if (!vfs_getattr(&file->f_path, &st, STATX_DIOALIGN, 0) && 530f4774e92SChristoph Hellwig (st.result_mask & STATX_DIOALIGN)) 531f4774e92SChristoph Hellwig return st.dio_offset_align; 532f4774e92SChristoph Hellwig 533f4774e92SChristoph Hellwig /* 534f4774e92SChristoph Hellwig * In a perfect world this wouldn't be needed, but as of Linux 6.13 only 535f4774e92SChristoph Hellwig * a handful of file systems support the STATX_DIOALIGN flag. 536f4774e92SChristoph Hellwig */ 537f4774e92SChristoph Hellwig if (sb_bdev) 538f4774e92SChristoph Hellwig return bdev_logical_block_size(sb_bdev); 539f4774e92SChristoph Hellwig return SECTOR_SIZE; 540f4774e92SChristoph Hellwig } 541f4774e92SChristoph Hellwig 542d2ac838eSTheodore Ts'o static inline int is_loop_device(struct file *file) 543d2ac838eSTheodore Ts'o { 544d2ac838eSTheodore Ts'o struct inode *i = file->f_mapping->host; 545d2ac838eSTheodore Ts'o 5466f24784fSAl Viro return i && S_ISBLK(i->i_mode) && imajor(i) == LOOP_MAJOR; 547d2ac838eSTheodore Ts'o } 548d2ac838eSTheodore Ts'o 549d2ac838eSTheodore Ts'o static int loop_validate_file(struct file *file, struct block_device *bdev) 550d2ac838eSTheodore Ts'o { 551d2ac838eSTheodore Ts'o struct inode *inode = file->f_mapping->host; 552d2ac838eSTheodore Ts'o struct file *f = file; 553d2ac838eSTheodore Ts'o 554d2ac838eSTheodore Ts'o /* Avoid recursion */ 555d2ac838eSTheodore Ts'o while (is_loop_device(f)) { 556d2ac838eSTheodore Ts'o struct loop_device *l; 557d2ac838eSTheodore Ts'o 5583ce6e1f6STetsuo Handa lockdep_assert_held(&loop_validate_mutex); 5594e7b5671SChristoph Hellwig if (f->f_mapping->host->i_rdev == bdev->bd_dev) 560d2ac838eSTheodore Ts'o return -EBADF; 561d2ac838eSTheodore Ts'o 5624e7b5671SChristoph Hellwig l = I_BDEV(f->f_mapping->host)->bd_disk->private_data; 5633ce6e1f6STetsuo Handa if (l->lo_state != Lo_bound) 564d2ac838eSTheodore Ts'o return -EINVAL; 5653ce6e1f6STetsuo Handa /* Order wrt setting lo->lo_backing_file in loop_configure(). */ 5663ce6e1f6STetsuo Handa rmb(); 567d2ac838eSTheodore Ts'o f = l->lo_backing_file; 568d2ac838eSTheodore Ts'o } 569d2ac838eSTheodore Ts'o if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode)) 570d2ac838eSTheodore Ts'o return -EINVAL; 571d2ac838eSTheodore Ts'o return 0; 572d2ac838eSTheodore Ts'o } 573d2ac838eSTheodore Ts'o 574d2781648SChristoph Hellwig static void loop_assign_backing_file(struct loop_device *lo, struct file *file) 575d2781648SChristoph Hellwig { 576d2781648SChristoph Hellwig lo->lo_backing_file = file; 577d2781648SChristoph Hellwig lo->old_gfp_mask = mapping_gfp_mask(file->f_mapping); 578d2781648SChristoph Hellwig mapping_set_gfp_mask(file->f_mapping, 579d2781648SChristoph Hellwig lo->old_gfp_mask & ~(__GFP_IO | __GFP_FS)); 580984c2ab4SChristoph Hellwig if (lo->lo_backing_file->f_flags & O_DIRECT) 581984c2ab4SChristoph Hellwig lo->lo_flags |= LO_FLAGS_DIRECT_IO; 582f4774e92SChristoph Hellwig lo->lo_min_dio_size = loop_query_min_dio_size(lo); 583d2781648SChristoph Hellwig } 584d2781648SChristoph Hellwig 5851da177e4SLinus Torvalds /* 5861da177e4SLinus Torvalds * loop_change_fd switched the backing store of a loopback device to 5871da177e4SLinus Torvalds * a new file. This is useful for operating system installers to free up 5881da177e4SLinus Torvalds * the original file and in High Availability environments to switch to 5891da177e4SLinus Torvalds * an alternative location for the content in case of server meltdown. 5901da177e4SLinus Torvalds * This can only work if the loop device is used read-only, and if the 5911da177e4SLinus Torvalds * new backing store is the same size and type as the old backing store. 5921da177e4SLinus Torvalds */ 593bb214884SAl Viro static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, 594bb214884SAl Viro unsigned int arg) 5951da177e4SLinus Torvalds { 5963ce6e1f6STetsuo Handa struct file *file = fget(arg); 5973ce6e1f6STetsuo Handa struct file *old_file; 5981e1a9cecSChristoph Hellwig unsigned int memflags; 5991da177e4SLinus Torvalds int error; 60085b0a54aSJan Kara bool partscan; 6013ce6e1f6STetsuo Handa bool is_loop; 6021da177e4SLinus Torvalds 6033ce6e1f6STetsuo Handa if (!file) 6043ce6e1f6STetsuo Handa return -EBADF; 605498ef5c7SChristoph Hellwig 606498ef5c7SChristoph Hellwig /* suppress uevents while reconfiguring the device */ 607498ef5c7SChristoph Hellwig dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1); 608498ef5c7SChristoph Hellwig 6093ce6e1f6STetsuo Handa is_loop = is_loop_device(file); 6103ce6e1f6STetsuo Handa error = loop_global_lock_killable(lo, is_loop); 611c3710770SJan Kara if (error) 6123ce6e1f6STetsuo Handa goto out_putf; 6131da177e4SLinus Torvalds error = -ENXIO; 6141da177e4SLinus Torvalds if (lo->lo_state != Lo_bound) 6151dded9acSJan Kara goto out_err; 6161da177e4SLinus Torvalds 6171da177e4SLinus Torvalds /* the loop device has to be read-only */ 6181da177e4SLinus Torvalds error = -EINVAL; 6191da177e4SLinus Torvalds if (!(lo->lo_flags & LO_FLAGS_READ_ONLY)) 6201dded9acSJan Kara goto out_err; 6211da177e4SLinus Torvalds 622d2ac838eSTheodore Ts'o error = loop_validate_file(file, bdev); 623d2ac838eSTheodore Ts'o if (error) 6241dded9acSJan Kara goto out_err; 625d2ac838eSTheodore Ts'o 6261da177e4SLinus Torvalds old_file = lo->lo_backing_file; 6271da177e4SLinus Torvalds 6281da177e4SLinus Torvalds error = -EINVAL; 6291da177e4SLinus Torvalds 6301da177e4SLinus Torvalds /* size of the new backing store needs to be the same */ 6311da177e4SLinus Torvalds if (get_loop_size(lo, file) != get_loop_size(lo, old_file)) 6321dded9acSJan Kara goto out_err; 6331da177e4SLinus Torvalds 63486947bdcSMing Lei /* 63586947bdcSMing Lei * We might switch to direct I/O mode for the loop device, write back 63686947bdcSMing Lei * all dirty data the page cache now that so that the individual I/O 63786947bdcSMing Lei * operations don't have to do that. 63886947bdcSMing Lei */ 63986947bdcSMing Lei vfs_fsync(file, 0); 64086947bdcSMing Lei 6411da177e4SLinus Torvalds /* and ... switch */ 642ab6860f6SChristoph Hellwig disk_force_media_change(lo->lo_disk); 6431e1a9cecSChristoph Hellwig memflags = blk_mq_freeze_queue(lo->lo_queue); 64443cade80SOmar Sandoval mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask); 645d2781648SChristoph Hellwig loop_assign_backing_file(lo, file); 64643cade80SOmar Sandoval loop_update_dio(lo); 6471e1a9cecSChristoph Hellwig blk_mq_unfreeze_queue(lo->lo_queue, memflags); 64885b0a54aSJan Kara partscan = lo->lo_flags & LO_FLAGS_PARTSCAN; 6493ce6e1f6STetsuo Handa loop_global_unlock(lo, is_loop); 6503ce6e1f6STetsuo Handa 6513ce6e1f6STetsuo Handa /* 6523ce6e1f6STetsuo Handa * Flush loop_validate_file() before fput(), for l->lo_backing_file 6533ce6e1f6STetsuo Handa * might be pointing at old_file which might be the last reference. 6543ce6e1f6STetsuo Handa */ 6553ce6e1f6STetsuo Handa if (!is_loop) { 6563ce6e1f6STetsuo Handa mutex_lock(&loop_validate_mutex); 6573ce6e1f6STetsuo Handa mutex_unlock(&loop_validate_mutex); 6583ce6e1f6STetsuo Handa } 6591dded9acSJan Kara /* 6606cc8e743SPavel Tatashin * We must drop file reference outside of lo_mutex as dropping 661a8698707SChristoph Hellwig * the file ref can take open_mutex which creates circular locking 6621dded9acSJan Kara * dependency. 6631dded9acSJan Kara */ 6641dded9acSJan Kara fput(old_file); 665*0dba7a05SThomas Weißschuh dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0); 66685b0a54aSJan Kara if (partscan) 6670384264eSChristoph Hellwig loop_reread_partitions(lo); 668498ef5c7SChristoph Hellwig 669498ef5c7SChristoph Hellwig error = 0; 670498ef5c7SChristoph Hellwig done: 671e7bc0010SThomas Weißschuh kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE); 672498ef5c7SChristoph Hellwig return error; 6731da177e4SLinus Torvalds 6741dded9acSJan Kara out_err: 6753ce6e1f6STetsuo Handa loop_global_unlock(lo, is_loop); 6763ce6e1f6STetsuo Handa out_putf: 6771dded9acSJan Kara fput(file); 678*0dba7a05SThomas Weißschuh dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0); 679498ef5c7SChristoph Hellwig goto done; 6801da177e4SLinus Torvalds } 6811da177e4SLinus Torvalds 682ee862730SMilan Broz /* loop sysfs attributes */ 683ee862730SMilan Broz 684ee862730SMilan Broz static ssize_t loop_attr_show(struct device *dev, char *page, 685ee862730SMilan Broz ssize_t (*callback)(struct loop_device *, char *)) 686ee862730SMilan Broz { 68734dd82afSKay Sievers struct gendisk *disk = dev_to_disk(dev); 68834dd82afSKay Sievers struct loop_device *lo = disk->private_data; 689ee862730SMilan Broz 69034dd82afSKay Sievers return callback(lo, page); 691ee862730SMilan Broz } 692ee862730SMilan Broz 693ee862730SMilan Broz #define LOOP_ATTR_RO(_name) \ 694ee862730SMilan Broz static ssize_t loop_attr_##_name##_show(struct loop_device *, char *); \ 695ee862730SMilan Broz static ssize_t loop_attr_do_show_##_name(struct device *d, \ 696ee862730SMilan Broz struct device_attribute *attr, char *b) \ 697ee862730SMilan Broz { \ 698ee862730SMilan Broz return loop_attr_show(d, b, loop_attr_##_name##_show); \ 699ee862730SMilan Broz } \ 700ee862730SMilan Broz static struct device_attribute loop_attr_##_name = \ 7015657a819SJoe Perches __ATTR(_name, 0444, loop_attr_do_show_##_name, NULL); 702ee862730SMilan Broz 703ee862730SMilan Broz static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf) 704ee862730SMilan Broz { 705ee862730SMilan Broz ssize_t ret; 706ee862730SMilan Broz char *p = NULL; 707ee862730SMilan Broz 70805eb0f25SKay Sievers spin_lock_irq(&lo->lo_lock); 709ee862730SMilan Broz if (lo->lo_backing_file) 7109bf39ab2SMiklos Szeredi p = file_path(lo->lo_backing_file, buf, PAGE_SIZE - 1); 71105eb0f25SKay Sievers spin_unlock_irq(&lo->lo_lock); 712ee862730SMilan Broz 713ee862730SMilan Broz if (IS_ERR_OR_NULL(p)) 714ee862730SMilan Broz ret = PTR_ERR(p); 715ee862730SMilan Broz else { 716ee862730SMilan Broz ret = strlen(p); 717ee862730SMilan Broz memmove(buf, p, ret); 718ee862730SMilan Broz buf[ret++] = '\n'; 719ee862730SMilan Broz buf[ret] = 0; 720ee862730SMilan Broz } 721ee862730SMilan Broz 722ee862730SMilan Broz return ret; 723ee862730SMilan Broz } 724ee862730SMilan Broz 725ee862730SMilan Broz static ssize_t loop_attr_offset_show(struct loop_device *lo, char *buf) 726ee862730SMilan Broz { 727b27824d3SChaitanya Kulkarni return sysfs_emit(buf, "%llu\n", (unsigned long long)lo->lo_offset); 728ee862730SMilan Broz } 729ee862730SMilan Broz 730ee862730SMilan Broz static ssize_t loop_attr_sizelimit_show(struct loop_device *lo, char *buf) 731ee862730SMilan Broz { 732b27824d3SChaitanya Kulkarni return sysfs_emit(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit); 733ee862730SMilan Broz } 734ee862730SMilan Broz 735ee862730SMilan Broz static ssize_t loop_attr_autoclear_show(struct loop_device *lo, char *buf) 736ee862730SMilan Broz { 737ee862730SMilan Broz int autoclear = (lo->lo_flags & LO_FLAGS_AUTOCLEAR); 738ee862730SMilan Broz 739b27824d3SChaitanya Kulkarni return sysfs_emit(buf, "%s\n", autoclear ? "1" : "0"); 740ee862730SMilan Broz } 741ee862730SMilan Broz 742e03c8dd1SKay Sievers static ssize_t loop_attr_partscan_show(struct loop_device *lo, char *buf) 743e03c8dd1SKay Sievers { 744e03c8dd1SKay Sievers int partscan = (lo->lo_flags & LO_FLAGS_PARTSCAN); 745e03c8dd1SKay Sievers 746b27824d3SChaitanya Kulkarni return sysfs_emit(buf, "%s\n", partscan ? "1" : "0"); 747e03c8dd1SKay Sievers } 748e03c8dd1SKay Sievers 7492e5ab5f3SMing Lei static ssize_t loop_attr_dio_show(struct loop_device *lo, char *buf) 7502e5ab5f3SMing Lei { 7512e5ab5f3SMing Lei int dio = (lo->lo_flags & LO_FLAGS_DIRECT_IO); 7522e5ab5f3SMing Lei 753b27824d3SChaitanya Kulkarni return sysfs_emit(buf, "%s\n", dio ? "1" : "0"); 7542e5ab5f3SMing Lei } 7552e5ab5f3SMing Lei 756ee862730SMilan Broz LOOP_ATTR_RO(backing_file); 757ee862730SMilan Broz LOOP_ATTR_RO(offset); 758ee862730SMilan Broz LOOP_ATTR_RO(sizelimit); 759ee862730SMilan Broz LOOP_ATTR_RO(autoclear); 760e03c8dd1SKay Sievers LOOP_ATTR_RO(partscan); 7612e5ab5f3SMing Lei LOOP_ATTR_RO(dio); 762ee862730SMilan Broz 763ee862730SMilan Broz static struct attribute *loop_attrs[] = { 764ee862730SMilan Broz &loop_attr_backing_file.attr, 765ee862730SMilan Broz &loop_attr_offset.attr, 766ee862730SMilan Broz &loop_attr_sizelimit.attr, 767ee862730SMilan Broz &loop_attr_autoclear.attr, 768e03c8dd1SKay Sievers &loop_attr_partscan.attr, 7692e5ab5f3SMing Lei &loop_attr_dio.attr, 770ee862730SMilan Broz NULL, 771ee862730SMilan Broz }; 772ee862730SMilan Broz 773ee862730SMilan Broz static struct attribute_group loop_attribute_group = { 774ee862730SMilan Broz .name = "loop", 775ee862730SMilan Broz .attrs= loop_attrs, 776ee862730SMilan Broz }; 777ee862730SMilan Broz 778d3349b6bSTetsuo Handa static void loop_sysfs_init(struct loop_device *lo) 779ee862730SMilan Broz { 780d3349b6bSTetsuo Handa lo->sysfs_inited = !sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj, 781ee862730SMilan Broz &loop_attribute_group); 782ee862730SMilan Broz } 783ee862730SMilan Broz 784ee862730SMilan Broz static void loop_sysfs_exit(struct loop_device *lo) 785ee862730SMilan Broz { 786d3349b6bSTetsuo Handa if (lo->sysfs_inited) 787ee862730SMilan Broz sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj, 788ee862730SMilan Broz &loop_attribute_group); 789ee862730SMilan Broz } 790ee862730SMilan Broz 791b4912557SOGAWA Hirofumi static void loop_get_discard_config(struct loop_device *lo, 792b4912557SOGAWA Hirofumi u32 *granularity, u32 *max_discard_sectors) 793dfaa2ef6SLukas Czerner { 794dfaa2ef6SLukas Czerner struct file *file = lo->lo_backing_file; 795dfaa2ef6SLukas Czerner struct inode *inode = file->f_mapping->host; 79665bdd16fSChristoph Hellwig struct kstatfs sbuf; 797dfaa2ef6SLukas Czerner 798dfaa2ef6SLukas Czerner /* 799c52abf56SEvan Green * If the backing device is a block device, mirror its zeroing 800c52abf56SEvan Green * capability. Set the discard sectors to the block device's zeroing 801c52abf56SEvan Green * capabilities because loop discards result in blkdev_issue_zeroout(), 802c52abf56SEvan Green * not blkdev_issue_discard(). This maintains consistent behavior with 803c52abf56SEvan Green * file-backed loop devices: discarded regions read back as zero. 804c52abf56SEvan Green */ 80547e96246SChristoph Hellwig if (S_ISBLK(inode->i_mode)) { 8068d3fd059SJohn Garry struct block_device *bdev = I_BDEV(inode); 807c52abf56SEvan Green 808b4912557SOGAWA Hirofumi *max_discard_sectors = bdev_write_zeroes_sectors(bdev); 809b4912557SOGAWA Hirofumi *granularity = bdev_discard_granularity(bdev); 810c52abf56SEvan Green 811c52abf56SEvan Green /* 812dfaa2ef6SLukas Czerner * We use punch hole to reclaim the free space used by the 81347e96246SChristoph Hellwig * image a.k.a. discard. 814dfaa2ef6SLukas Czerner */ 81565bdd16fSChristoph Hellwig } else if (file->f_op->fallocate && !vfs_statfs(&file->f_path, &sbuf)) { 816b4912557SOGAWA Hirofumi *max_discard_sectors = UINT_MAX >> 9; 817b4912557SOGAWA Hirofumi *granularity = sbuf.f_bsize; 818c52abf56SEvan Green } 819bcb21c8cSMing Lei } 820dfaa2ef6SLukas Czerner 82187579e9bSDan Schatzberg struct loop_worker { 82287579e9bSDan Schatzberg struct rb_node rb_node; 82387579e9bSDan Schatzberg struct work_struct work; 82487579e9bSDan Schatzberg struct list_head cmd_list; 82587579e9bSDan Schatzberg struct list_head idle_list; 82687579e9bSDan Schatzberg struct loop_device *lo; 827c74d40e8SDan Schatzberg struct cgroup_subsys_state *blkcg_css; 82887579e9bSDan Schatzberg unsigned long last_ran_at; 82987579e9bSDan Schatzberg }; 830e03a3d7aSMing Lei 83187579e9bSDan Schatzberg static void loop_workfn(struct work_struct *work); 832b2ee7d46SNeilBrown 83387579e9bSDan Schatzberg #ifdef CONFIG_BLK_CGROUP 83487579e9bSDan Schatzberg static inline int queue_on_root_worker(struct cgroup_subsys_state *css) 835e03a3d7aSMing Lei { 83687579e9bSDan Schatzberg return !css || css == blkcg_root_css; 83787579e9bSDan Schatzberg } 83887579e9bSDan Schatzberg #else 83987579e9bSDan Schatzberg static inline int queue_on_root_worker(struct cgroup_subsys_state *css) 84087579e9bSDan Schatzberg { 84187579e9bSDan Schatzberg return !css; 84287579e9bSDan Schatzberg } 84387579e9bSDan Schatzberg #endif 84487579e9bSDan Schatzberg 84587579e9bSDan Schatzberg static void loop_queue_work(struct loop_device *lo, struct loop_cmd *cmd) 84687579e9bSDan Schatzberg { 847413ec805SColin Ian King struct rb_node **node, *parent = NULL; 84887579e9bSDan Schatzberg struct loop_worker *cur_worker, *worker = NULL; 84987579e9bSDan Schatzberg struct work_struct *work; 85087579e9bSDan Schatzberg struct list_head *cmd_list; 85187579e9bSDan Schatzberg 85287579e9bSDan Schatzberg spin_lock_irq(&lo->lo_work_lock); 85387579e9bSDan Schatzberg 854c74d40e8SDan Schatzberg if (queue_on_root_worker(cmd->blkcg_css)) 85587579e9bSDan Schatzberg goto queue_work; 85687579e9bSDan Schatzberg 85787579e9bSDan Schatzberg node = &lo->worker_tree.rb_node; 85887579e9bSDan Schatzberg 85987579e9bSDan Schatzberg while (*node) { 86087579e9bSDan Schatzberg parent = *node; 86187579e9bSDan Schatzberg cur_worker = container_of(*node, struct loop_worker, rb_node); 862c74d40e8SDan Schatzberg if (cur_worker->blkcg_css == cmd->blkcg_css) { 86387579e9bSDan Schatzberg worker = cur_worker; 86487579e9bSDan Schatzberg break; 865c74d40e8SDan Schatzberg } else if ((long)cur_worker->blkcg_css < (long)cmd->blkcg_css) { 86687579e9bSDan Schatzberg node = &(*node)->rb_left; 86787579e9bSDan Schatzberg } else { 86887579e9bSDan Schatzberg node = &(*node)->rb_right; 86987579e9bSDan Schatzberg } 87087579e9bSDan Schatzberg } 87187579e9bSDan Schatzberg if (worker) 87287579e9bSDan Schatzberg goto queue_work; 87387579e9bSDan Schatzberg 87487579e9bSDan Schatzberg worker = kzalloc(sizeof(struct loop_worker), GFP_NOWAIT | __GFP_NOWARN); 87587579e9bSDan Schatzberg /* 87687579e9bSDan Schatzberg * In the event we cannot allocate a worker, just queue on the 877c74d40e8SDan Schatzberg * rootcg worker and issue the I/O as the rootcg 87887579e9bSDan Schatzberg */ 879c74d40e8SDan Schatzberg if (!worker) { 880c74d40e8SDan Schatzberg cmd->blkcg_css = NULL; 881c74d40e8SDan Schatzberg if (cmd->memcg_css) 882c74d40e8SDan Schatzberg css_put(cmd->memcg_css); 883c74d40e8SDan Schatzberg cmd->memcg_css = NULL; 88487579e9bSDan Schatzberg goto queue_work; 885c74d40e8SDan Schatzberg } 88687579e9bSDan Schatzberg 887c74d40e8SDan Schatzberg worker->blkcg_css = cmd->blkcg_css; 888c74d40e8SDan Schatzberg css_get(worker->blkcg_css); 88987579e9bSDan Schatzberg INIT_WORK(&worker->work, loop_workfn); 89087579e9bSDan Schatzberg INIT_LIST_HEAD(&worker->cmd_list); 89187579e9bSDan Schatzberg INIT_LIST_HEAD(&worker->idle_list); 89287579e9bSDan Schatzberg worker->lo = lo; 89387579e9bSDan Schatzberg rb_link_node(&worker->rb_node, parent, node); 89487579e9bSDan Schatzberg rb_insert_color(&worker->rb_node, &lo->worker_tree); 89587579e9bSDan Schatzberg queue_work: 89687579e9bSDan Schatzberg if (worker) { 89787579e9bSDan Schatzberg /* 89887579e9bSDan Schatzberg * We need to remove from the idle list here while 89987579e9bSDan Schatzberg * holding the lock so that the idle timer doesn't 90087579e9bSDan Schatzberg * free the worker 90187579e9bSDan Schatzberg */ 90287579e9bSDan Schatzberg if (!list_empty(&worker->idle_list)) 90387579e9bSDan Schatzberg list_del_init(&worker->idle_list); 90487579e9bSDan Schatzberg work = &worker->work; 90587579e9bSDan Schatzberg cmd_list = &worker->cmd_list; 90687579e9bSDan Schatzberg } else { 90787579e9bSDan Schatzberg work = &lo->rootcg_work; 90887579e9bSDan Schatzberg cmd_list = &lo->rootcg_cmd_list; 90987579e9bSDan Schatzberg } 91087579e9bSDan Schatzberg list_add_tail(&cmd->list_entry, cmd_list); 9113bee991fSZhaoyang Huang queue_work(lo->workqueue, work); 91202b3c61aSZhaoyang Huang spin_unlock_irq(&lo->lo_work_lock); 913e03a3d7aSMing Lei } 914e03a3d7aSMing Lei 9152cf429b5SChristoph Hellwig static void loop_set_timer(struct loop_device *lo) 9162cf429b5SChristoph Hellwig { 9172cf429b5SChristoph Hellwig timer_reduce(&lo->timer, jiffies + LOOP_IDLE_WORKER_TIMEOUT); 9182cf429b5SChristoph Hellwig } 9192cf429b5SChristoph Hellwig 9202cf429b5SChristoph Hellwig static void loop_free_idle_workers(struct loop_device *lo, bool delete_all) 9212cf429b5SChristoph Hellwig { 9222cf429b5SChristoph Hellwig struct loop_worker *pos, *worker; 9232cf429b5SChristoph Hellwig 9242cf429b5SChristoph Hellwig spin_lock_irq(&lo->lo_work_lock); 9252cf429b5SChristoph Hellwig list_for_each_entry_safe(worker, pos, &lo->idle_worker_list, 9262cf429b5SChristoph Hellwig idle_list) { 9272cf429b5SChristoph Hellwig if (!delete_all && 9282cf429b5SChristoph Hellwig time_is_after_jiffies(worker->last_ran_at + 9292cf429b5SChristoph Hellwig LOOP_IDLE_WORKER_TIMEOUT)) 9302cf429b5SChristoph Hellwig break; 9312cf429b5SChristoph Hellwig list_del(&worker->idle_list); 9322cf429b5SChristoph Hellwig rb_erase(&worker->rb_node, &lo->worker_tree); 9332cf429b5SChristoph Hellwig css_put(worker->blkcg_css); 9342cf429b5SChristoph Hellwig kfree(worker); 9352cf429b5SChristoph Hellwig } 9362cf429b5SChristoph Hellwig if (!list_empty(&lo->idle_worker_list)) 9372cf429b5SChristoph Hellwig loop_set_timer(lo); 9382cf429b5SChristoph Hellwig spin_unlock_irq(&lo->lo_work_lock); 9392cf429b5SChristoph Hellwig } 9402cf429b5SChristoph Hellwig 9412cf429b5SChristoph Hellwig static void loop_free_idle_workers_timer(struct timer_list *timer) 9422cf429b5SChristoph Hellwig { 9432cf429b5SChristoph Hellwig struct loop_device *lo = container_of(timer, struct loop_device, timer); 9442cf429b5SChristoph Hellwig 9452cf429b5SChristoph Hellwig return loop_free_idle_workers(lo, false); 9462cf429b5SChristoph Hellwig } 9472cf429b5SChristoph Hellwig 94862ab466cSMartijn Coenen /** 94962ab466cSMartijn Coenen * loop_set_status_from_info - configure device from loop_info 95062ab466cSMartijn Coenen * @lo: struct loop_device to configure 95162ab466cSMartijn Coenen * @info: struct loop_info64 to configure the device with 95262ab466cSMartijn Coenen * 95362ab466cSMartijn Coenen * Configures the loop device parameters according to the passed 95462ab466cSMartijn Coenen * in loop_info64 configuration. 95562ab466cSMartijn Coenen */ 95662ab466cSMartijn Coenen static int 95762ab466cSMartijn Coenen loop_set_status_from_info(struct loop_device *lo, 95862ab466cSMartijn Coenen const struct loop_info64 *info) 95962ab466cSMartijn Coenen { 96062ab466cSMartijn Coenen if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) 96162ab466cSMartijn Coenen return -EINVAL; 96262ab466cSMartijn Coenen 96347e96246SChristoph Hellwig switch (info->lo_encrypt_type) { 96447e96246SChristoph Hellwig case LO_CRYPT_NONE: 96547e96246SChristoph Hellwig break; 96647e96246SChristoph Hellwig case LO_CRYPT_XOR: 96747e96246SChristoph Hellwig pr_warn("support for the xor transformation has been removed.\n"); 96862ab466cSMartijn Coenen return -EINVAL; 96947e96246SChristoph Hellwig case LO_CRYPT_CRYPTOAPI: 97047e96246SChristoph Hellwig pr_warn("support for cryptoloop has been removed. Use dm-crypt instead.\n"); 97162ab466cSMartijn Coenen return -EINVAL; 97247e96246SChristoph Hellwig default: 97347e96246SChristoph Hellwig return -EINVAL; 97447e96246SChristoph Hellwig } 97562ab466cSMartijn Coenen 9769f6ad5d5SZhong Jinghua /* Avoid assigning overflow values */ 9779f6ad5d5SZhong Jinghua if (info->lo_offset > LLONG_MAX || info->lo_sizelimit > LLONG_MAX) 9789f6ad5d5SZhong Jinghua return -EOVERFLOW; 9799f6ad5d5SZhong Jinghua 98062ab466cSMartijn Coenen lo->lo_offset = info->lo_offset; 98162ab466cSMartijn Coenen lo->lo_sizelimit = info->lo_sizelimit; 982c490a0b5SSiddh Raman Pant 98362ab466cSMartijn Coenen memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE); 98462ab466cSMartijn Coenen lo->lo_file_name[LO_NAME_SIZE-1] = 0; 98562ab466cSMartijn Coenen return 0; 98662ab466cSMartijn Coenen } 98762ab466cSMartijn Coenen 988f4774e92SChristoph Hellwig static unsigned int loop_default_blocksize(struct loop_device *lo) 989473516b3SChristoph Hellwig { 990f4774e92SChristoph Hellwig /* In case of direct I/O, match underlying minimum I/O size */ 991f4774e92SChristoph Hellwig if (lo->lo_flags & LO_FLAGS_DIRECT_IO) 992f4774e92SChristoph Hellwig return lo->lo_min_dio_size; 993a17ece76SChristoph Hellwig return SECTOR_SIZE; 994a17ece76SChristoph Hellwig } 995a17ece76SChristoph Hellwig 996b38c8be2SChristoph Hellwig static void loop_update_limits(struct loop_device *lo, struct queue_limits *lim, 997b38c8be2SChristoph Hellwig unsigned int bsize) 998473516b3SChristoph Hellwig { 999a17ece76SChristoph Hellwig struct file *file = lo->lo_backing_file; 1000a17ece76SChristoph Hellwig struct inode *inode = file->f_mapping->host; 10014ce37fe0SChristoph Hellwig struct block_device *backing_bdev = NULL; 1002b4912557SOGAWA Hirofumi u32 granularity = 0, max_discard_sectors = 0; 1003473516b3SChristoph Hellwig 10044ce37fe0SChristoph Hellwig if (S_ISBLK(inode->i_mode)) 10054ce37fe0SChristoph Hellwig backing_bdev = I_BDEV(inode); 10064ce37fe0SChristoph Hellwig else if (inode->i_sb->s_bdev) 10074ce37fe0SChristoph Hellwig backing_bdev = inode->i_sb->s_bdev; 10084ce37fe0SChristoph Hellwig 1009a17ece76SChristoph Hellwig if (!bsize) 1010f4774e92SChristoph Hellwig bsize = loop_default_blocksize(lo); 1011a17ece76SChristoph Hellwig 1012b4912557SOGAWA Hirofumi loop_get_discard_config(lo, &granularity, &max_discard_sectors); 1013b4912557SOGAWA Hirofumi 1014b38c8be2SChristoph Hellwig lim->logical_block_size = bsize; 1015b38c8be2SChristoph Hellwig lim->physical_block_size = bsize; 1016b38c8be2SChristoph Hellwig lim->io_min = bsize; 1017b38c8be2SChristoph Hellwig lim->features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_ROTATIONAL); 10181122c0c1SChristoph Hellwig if (file->f_op->fsync && !(lo->lo_flags & LO_FLAGS_READ_ONLY)) 1019b38c8be2SChristoph Hellwig lim->features |= BLK_FEAT_WRITE_CACHE; 1020bd4a633bSChristoph Hellwig if (backing_bdev && !bdev_nonrot(backing_bdev)) 1021b38c8be2SChristoph Hellwig lim->features |= BLK_FEAT_ROTATIONAL; 1022b38c8be2SChristoph Hellwig lim->max_hw_discard_sectors = max_discard_sectors; 1023b38c8be2SChristoph Hellwig lim->max_write_zeroes_sectors = max_discard_sectors; 1024b4912557SOGAWA Hirofumi if (max_discard_sectors) 1025b38c8be2SChristoph Hellwig lim->discard_granularity = granularity; 1026b4912557SOGAWA Hirofumi else 1027b38c8be2SChristoph Hellwig lim->discard_granularity = 0; 1028473516b3SChristoph Hellwig } 1029473516b3SChristoph Hellwig 103005bdb996SChristoph Hellwig static int loop_configure(struct loop_device *lo, blk_mode_t mode, 10313448914eSMartijn Coenen struct block_device *bdev, 10323448914eSMartijn Coenen const struct loop_config *config) 10331da177e4SLinus Torvalds { 10343ce6e1f6STetsuo Handa struct file *file = fget(config->fd); 1035b38c8be2SChristoph Hellwig struct queue_limits lim; 10361da177e4SLinus Torvalds int error; 10371da177e4SLinus Torvalds loff_t size; 103885b0a54aSJan Kara bool partscan; 10393ce6e1f6STetsuo Handa bool is_loop; 10403ce6e1f6STetsuo Handa 10413ce6e1f6STetsuo Handa if (!file) 10423ce6e1f6STetsuo Handa return -EBADF; 10433ce6e1f6STetsuo Handa is_loop = is_loop_device(file); 10441da177e4SLinus Torvalds 10451da177e4SLinus Torvalds /* This is safe, since we have a reference from open(). */ 10461da177e4SLinus Torvalds __module_get(THIS_MODULE); 10471da177e4SLinus Torvalds 104833ec3e53SJan Kara /* 104933ec3e53SJan Kara * If we don't hold exclusive handle for the device, upgrade to it 105033ec3e53SJan Kara * here to avoid changing device under exclusive owner. 105133ec3e53SJan Kara */ 105205bdb996SChristoph Hellwig if (!(mode & BLK_OPEN_EXCL)) { 10530718afd4SChristoph Hellwig error = bd_prepare_to_claim(bdev, loop_configure, NULL); 1054ecbe6bc0SChristoph Hellwig if (error) 1055757ecf40SJan Kara goto out_putf; 105633ec3e53SJan Kara } 105733ec3e53SJan Kara 10583ce6e1f6STetsuo Handa error = loop_global_lock_killable(lo, is_loop); 105933ec3e53SJan Kara if (error) 106033ec3e53SJan Kara goto out_bdev; 1061757ecf40SJan Kara 10621da177e4SLinus Torvalds error = -EBUSY; 10631da177e4SLinus Torvalds if (lo->lo_state != Lo_unbound) 1064757ecf40SJan Kara goto out_unlock; 10651da177e4SLinus Torvalds 1066d2ac838eSTheodore Ts'o error = loop_validate_file(file, bdev); 1067d2ac838eSTheodore Ts'o if (error) 1068757ecf40SJan Kara goto out_unlock; 10691da177e4SLinus Torvalds 10703448914eSMartijn Coenen if ((config->info.lo_flags & ~LOOP_CONFIGURE_SETTABLE_FLAGS) != 0) { 10713448914eSMartijn Coenen error = -EINVAL; 10723448914eSMartijn Coenen goto out_unlock; 10733448914eSMartijn Coenen } 10743448914eSMartijn Coenen 10753448914eSMartijn Coenen error = loop_set_status_from_info(lo, &config->info); 10763448914eSMartijn Coenen if (error) 10773448914eSMartijn Coenen goto out_unlock; 1078ae074d07SChristoph Hellwig lo->lo_flags = config->info.lo_flags; 10793448914eSMartijn Coenen 108005bdb996SChristoph Hellwig if (!(file->f_mode & FMODE_WRITE) || !(mode & BLK_OPEN_WRITE) || 1081283e7e5dSAl Viro !file->f_op->write_iter) 10823448914eSMartijn Coenen lo->lo_flags |= LO_FLAGS_READ_ONLY; 1083083a6a50SMartijn Coenen 1084d292dc80SChristoph Hellwig if (!lo->workqueue) { 108587579e9bSDan Schatzberg lo->workqueue = alloc_workqueue("loop%d", 108687579e9bSDan Schatzberg WQ_UNBOUND | WQ_FREEZABLE, 1087d292dc80SChristoph Hellwig 0, lo->lo_number); 108887579e9bSDan Schatzberg if (!lo->workqueue) { 108987579e9bSDan Schatzberg error = -ENOMEM; 1090757ecf40SJan Kara goto out_unlock; 109187579e9bSDan Schatzberg } 1092d292dc80SChristoph Hellwig } 10931da177e4SLinus Torvalds 1094bb430b69SAlyssa Ross /* suppress uevents while reconfiguring the device */ 1095bb430b69SAlyssa Ross dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1); 1096bb430b69SAlyssa Ross 1097ab6860f6SChristoph Hellwig disk_force_media_change(lo->lo_disk); 10987a2f0ce1SChristoph Hellwig set_disk_ro(lo->lo_disk, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0); 10991da177e4SLinus Torvalds 11001da177e4SLinus Torvalds lo->lo_device = bdev; 1101d2781648SChristoph Hellwig loop_assign_backing_file(lo, file); 11021da177e4SLinus Torvalds 1103b38c8be2SChristoph Hellwig lim = queue_limits_start_update(lo->lo_queue); 1104b38c8be2SChristoph Hellwig loop_update_limits(lo, &lim, config->block_size); 1105b03732a9SChristoph Hellwig /* No need to freeze the queue as the device isn't bound yet. */ 1106b38c8be2SChristoph Hellwig error = queue_limits_commit_update(lo->lo_queue, &lim); 11079423c653SJohn Garry if (error) 1108473516b3SChristoph Hellwig goto out_unlock; 110985560117SMartijn Coenen 111086947bdcSMing Lei /* 111186947bdcSMing Lei * We might switch to direct I/O mode for the loop device, write back 111286947bdcSMing Lei * all dirty data the page cache now that so that the individual I/O 111386947bdcSMing Lei * operations don't have to do that. 111486947bdcSMing Lei */ 111586947bdcSMing Lei vfs_fsync(file, 0); 111686947bdcSMing Lei 11172e5ab5f3SMing Lei loop_update_dio(lo); 1118ee862730SMilan Broz loop_sysfs_init(lo); 111979e5dc59SMartijn Coenen 112079e5dc59SMartijn Coenen size = get_loop_size(lo, file); 11215795b6f5SMartijn Coenen loop_set_size(lo, size); 11221da177e4SLinus Torvalds 11233ce6e1f6STetsuo Handa /* Order wrt reading lo_state in loop_validate_file(). */ 11243ce6e1f6STetsuo Handa wmb(); 11253ce6e1f6STetsuo Handa 11266c997918SSerge E. Hallyn lo->lo_state = Lo_bound; 1127e03c8dd1SKay Sievers if (part_shift) 1128e03c8dd1SKay Sievers lo->lo_flags |= LO_FLAGS_PARTSCAN; 112985b0a54aSJan Kara partscan = lo->lo_flags & LO_FLAGS_PARTSCAN; 1130fe6a8fc5SLennart Poettering if (partscan) 1131b9684a71SChristoph Hellwig clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state); 1132c1681bf8SAnatol Pomozov 1133bb430b69SAlyssa Ross dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0); 1134e7bc0010SThomas Weißschuh kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE); 1135bb430b69SAlyssa Ross 11363ce6e1f6STetsuo Handa loop_global_unlock(lo, is_loop); 113785b0a54aSJan Kara if (partscan) 11380384264eSChristoph Hellwig loop_reread_partitions(lo); 1139bb430b69SAlyssa Ross 114005bdb996SChristoph Hellwig if (!(mode & BLK_OPEN_EXCL)) 114137c3fc9aSChristoph Hellwig bd_abort_claiming(bdev, loop_configure); 1142498ef5c7SChristoph Hellwig 1143bb430b69SAlyssa Ross return 0; 11441da177e4SLinus Torvalds 1145757ecf40SJan Kara out_unlock: 11463ce6e1f6STetsuo Handa loop_global_unlock(lo, is_loop); 114733ec3e53SJan Kara out_bdev: 114805bdb996SChristoph Hellwig if (!(mode & BLK_OPEN_EXCL)) 114937c3fc9aSChristoph Hellwig bd_abort_claiming(bdev, loop_configure); 11501da177e4SLinus Torvalds out_putf: 11511da177e4SLinus Torvalds fput(file); 11521da177e4SLinus Torvalds /* This is safe: open() is still holding a reference. */ 11531da177e4SLinus Torvalds module_put(THIS_MODULE); 1154bb430b69SAlyssa Ross return error; 11551da177e4SLinus Torvalds } 11561da177e4SLinus Torvalds 115718048c1aSGulam Mohamed static void __loop_clr_fd(struct loop_device *lo) 11581da177e4SLinus Torvalds { 1159c9055b44SChristoph Hellwig struct queue_limits lim; 11606050fa4cSTetsuo Handa struct file *filp; 1161b4e3ca1aSAl Viro gfp_t gfp = lo->old_gfp_mask; 11621da177e4SLinus Torvalds 11631da177e4SLinus Torvalds spin_lock_irq(&lo->lo_lock); 11646050fa4cSTetsuo Handa filp = lo->lo_backing_file; 11651da177e4SLinus Torvalds lo->lo_backing_file = NULL; 116605eb0f25SKay Sievers spin_unlock_irq(&lo->lo_lock); 11671da177e4SLinus Torvalds 11681da177e4SLinus Torvalds lo->lo_device = NULL; 11691da177e4SLinus Torvalds lo->lo_offset = 0; 11701da177e4SLinus Torvalds lo->lo_sizelimit = 0; 11711da177e4SLinus Torvalds memset(lo->lo_file_name, 0, LO_NAME_SIZE); 1172c9055b44SChristoph Hellwig 1173b03732a9SChristoph Hellwig /* 1174b03732a9SChristoph Hellwig * Reset the block size to the default. 1175b03732a9SChristoph Hellwig * 1176b03732a9SChristoph Hellwig * No queue freezing needed because this is called from the final 1177b03732a9SChristoph Hellwig * ->release call only, so there can't be any outstanding I/O. 1178b03732a9SChristoph Hellwig */ 1179c9055b44SChristoph Hellwig lim = queue_limits_start_update(lo->lo_queue); 1180c9055b44SChristoph Hellwig lim.logical_block_size = SECTOR_SIZE; 1181c9055b44SChristoph Hellwig lim.physical_block_size = SECTOR_SIZE; 1182c9055b44SChristoph Hellwig lim.io_min = SECTOR_SIZE; 1183c9055b44SChristoph Hellwig queue_limits_commit_update(lo->lo_queue, &lim); 1184c9055b44SChristoph Hellwig 1185e515be8fSXie Yongji invalidate_disk(lo->lo_disk); 118651a0bb0cSMilan Broz loop_sysfs_exit(lo); 1187c3473c63SDavid Zeuthen /* let user-space know about this change */ 118819f553dbSXie Yongji kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE); 11891da177e4SLinus Torvalds mapping_set_gfp_mask(filp->f_mapping, gfp); 1190bf23747eSTetsuo Handa /* This is safe: open() is still holding a reference. */ 1191bf23747eSTetsuo Handa module_put(THIS_MODULE); 1192f8933667SMing Lei 1193ab6860f6SChristoph Hellwig disk_force_media_change(lo->lo_disk); 11946050fa4cSTetsuo Handa 11956050fa4cSTetsuo Handa if (lo->lo_flags & LO_FLAGS_PARTSCAN) { 11966050fa4cSTetsuo Handa int err; 11976050fa4cSTetsuo Handa 1198bf23747eSTetsuo Handa /* 1199bf23747eSTetsuo Handa * open_mutex has been held already in release path, so don't 1200bf23747eSTetsuo Handa * acquire it if this function is called in such case. 1201bf23747eSTetsuo Handa * 1202bf23747eSTetsuo Handa * If the reread partition isn't from release path, lo_refcnt 1203bf23747eSTetsuo Handa * must be at least one and it can only become zero when the 1204bf23747eSTetsuo Handa * current holder is released. 1205bf23747eSTetsuo Handa */ 12060384264eSChristoph Hellwig err = bdev_disk_changed(lo->lo_disk, false); 120740853d6fSDongli Zhang if (err) 1208d57f3374SJan Kara pr_warn("%s: partition scan of loop%d failed (rc=%d)\n", 12096050fa4cSTetsuo Handa __func__, lo->lo_number, err); 1210d57f3374SJan Kara /* Device is gone, no point in returning error */ 1211d57f3374SJan Kara } 1212758a58d0SDongli Zhang 1213bf23747eSTetsuo Handa /* 1214bf23747eSTetsuo Handa * lo->lo_state is set to Lo_unbound here after above partscan has 1215bf23747eSTetsuo Handa * finished. There cannot be anybody else entering __loop_clr_fd() as 1216bf23747eSTetsuo Handa * Lo_rundown state protects us from all the other places trying to 1217bf23747eSTetsuo Handa * change the 'lo' device. 1218bf23747eSTetsuo Handa */ 1219758a58d0SDongli Zhang lo->lo_flags = 0; 1220758a58d0SDongli Zhang if (!part_shift) 1221b9684a71SChristoph Hellwig set_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state); 12226050fa4cSTetsuo Handa mutex_lock(&lo->lo_mutex); 1223758a58d0SDongli Zhang lo->lo_state = Lo_unbound; 12246cc8e743SPavel Tatashin mutex_unlock(&lo->lo_mutex); 1225758a58d0SDongli Zhang 1226bf23747eSTetsuo Handa /* 1227bf23747eSTetsuo Handa * Need not hold lo_mutex to fput backing file. Calling fput holding 1228bf23747eSTetsuo Handa * lo_mutex triggers a circular lock dependency possibility warning as 1229bf23747eSTetsuo Handa * fput can take open_mutex which is usually taken before lo_mutex. 1230bf23747eSTetsuo Handa */ 1231bf23747eSTetsuo Handa fput(filp); 12321da177e4SLinus Torvalds } 12331da177e4SLinus Torvalds 1234a2505b79SJan Kara static int loop_clr_fd(struct loop_device *lo) 1235a2505b79SJan Kara { 12367ccd0791SJan Kara int err; 12377ccd0791SJan Kara 1238158eaebaSTetsuo Handa /* 1239158eaebaSTetsuo Handa * Since lo_ioctl() is called without locks held, it is possible that 1240158eaebaSTetsuo Handa * loop_configure()/loop_change_fd() and loop_clr_fd() run in parallel. 1241158eaebaSTetsuo Handa * 1242158eaebaSTetsuo Handa * Therefore, use global lock when setting Lo_rundown state in order to 1243158eaebaSTetsuo Handa * make sure that loop_validate_file() will fail if the "struct file" 1244158eaebaSTetsuo Handa * which loop_configure()/loop_change_fd() found via fget() was this 1245158eaebaSTetsuo Handa * loop device. 1246158eaebaSTetsuo Handa */ 1247158eaebaSTetsuo Handa err = loop_global_lock_killable(lo, true); 12487ccd0791SJan Kara if (err) 12497ccd0791SJan Kara return err; 12507ccd0791SJan Kara if (lo->lo_state != Lo_bound) { 1251158eaebaSTetsuo Handa loop_global_unlock(lo, true); 1252a2505b79SJan Kara return -ENXIO; 12537ccd0791SJan Kara } 1254a2505b79SJan Kara /* 125518048c1aSGulam Mohamed * Mark the device for removing the backing device on last close. 125618048c1aSGulam Mohamed * If we are the only opener, also switch the state to roundown here to 125718048c1aSGulam Mohamed * prevent new openers from coming in. 1258a2505b79SJan Kara */ 125918048c1aSGulam Mohamed 1260a2505b79SJan Kara lo->lo_flags |= LO_FLAGS_AUTOCLEAR; 126118048c1aSGulam Mohamed if (disk_openers(lo->lo_disk) == 1) 1262a2505b79SJan Kara lo->lo_state = Lo_rundown; 1263158eaebaSTetsuo Handa loop_global_unlock(lo, true); 1264a2505b79SJan Kara 12656050fa4cSTetsuo Handa return 0; 1266a2505b79SJan Kara } 1267a2505b79SJan Kara 12681da177e4SLinus Torvalds static int 12691da177e4SLinus Torvalds loop_set_status(struct loop_device *lo, const struct loop_info64 *info) 12701da177e4SLinus Torvalds { 12711da177e4SLinus Torvalds int err; 127285b0a54aSJan Kara bool partscan = false; 12730c3796c2SMartijn Coenen bool size_changed = false; 12741e1a9cecSChristoph Hellwig unsigned int memflags; 12751da177e4SLinus Torvalds 12766cc8e743SPavel Tatashin err = mutex_lock_killable(&lo->lo_mutex); 1277550df5fdSJan Kara if (err) 1278550df5fdSJan Kara return err; 1279550df5fdSJan Kara if (lo->lo_state != Lo_bound) { 1280550df5fdSJan Kara err = -ENXIO; 1281550df5fdSJan Kara goto out_unlock; 1282550df5fdSJan Kara } 12831da177e4SLinus Torvalds 12845db470e2SJaegeuk Kim if (lo->lo_offset != info->lo_offset || 12855db470e2SJaegeuk Kim lo->lo_sizelimit != info->lo_sizelimit) { 12860c3796c2SMartijn Coenen size_changed = true; 12875db470e2SJaegeuk Kim sync_blockdev(lo->lo_device); 1288f4bd34b1SZheng Bin invalidate_bdev(lo->lo_device); 12895db470e2SJaegeuk Kim } 12905db470e2SJaegeuk Kim 12914155adb0SChristoph Hellwig /* I/O needs to be drained before changing lo_offset or lo_sizelimit */ 12921e1a9cecSChristoph Hellwig memflags = blk_mq_freeze_queue(lo->lo_queue); 1293ecdd0959SMing Lei 12940c3796c2SMartijn Coenen err = loop_set_status_from_info(lo, info); 12950c3796c2SMartijn Coenen if (err) 1296550df5fdSJan Kara goto out_unfreeze; 12970c3796c2SMartijn Coenen 1298ae074d07SChristoph Hellwig partscan = !(lo->lo_flags & LO_FLAGS_PARTSCAN) && 1299ae074d07SChristoph Hellwig (info->lo_flags & LO_FLAGS_PARTSCAN); 1300ae074d07SChristoph Hellwig 13015aa21b04SChristoph Hellwig lo->lo_flags &= ~LOOP_SET_STATUS_CLEARABLE_FLAGS; 1302ae074d07SChristoph Hellwig lo->lo_flags |= (info->lo_flags & LOOP_SET_STATUS_SETTABLE_FLAGS); 1303faf1d254SMartijn Coenen 1304b0bd158dSMartijn Coenen if (size_changed) { 1305b0bd158dSMartijn Coenen loff_t new_size = get_size(lo->lo_offset, lo->lo_sizelimit, 1306b0bd158dSMartijn Coenen lo->lo_backing_file); 1307b0bd158dSMartijn Coenen loop_set_size(lo, new_size); 1308b040ad9cSArnd Bergmann } 1309541c742aSGuo Chao 13104155adb0SChristoph Hellwig /* update the direct I/O flag if lo_offset changed */ 13113a693110SChristoph Hellwig loop_update_dio(lo); 13122e5ab5f3SMing Lei 1313550df5fdSJan Kara out_unfreeze: 13141e1a9cecSChristoph Hellwig blk_mq_unfreeze_queue(lo->lo_queue, memflags); 1315ae074d07SChristoph Hellwig if (partscan) 1316b9684a71SChristoph Hellwig clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state); 1317550df5fdSJan Kara out_unlock: 13186cc8e743SPavel Tatashin mutex_unlock(&lo->lo_mutex); 131985b0a54aSJan Kara if (partscan) 13200384264eSChristoph Hellwig loop_reread_partitions(lo); 1321e02898b4SOmar Sandoval 1322ecdd0959SMing Lei return err; 13231da177e4SLinus Torvalds } 13241da177e4SLinus Torvalds 13251da177e4SLinus Torvalds static int 13261da177e4SLinus Torvalds loop_get_status(struct loop_device *lo, struct loop_info64 *info) 13271da177e4SLinus Torvalds { 1328b1ab5fa3STetsuo Handa struct path path; 13291da177e4SLinus Torvalds struct kstat stat; 13302d1d4c1eSOmar Sandoval int ret; 13311da177e4SLinus Torvalds 13326cc8e743SPavel Tatashin ret = mutex_lock_killable(&lo->lo_mutex); 13334a5ce9baSJan Kara if (ret) 13344a5ce9baSJan Kara return ret; 13352d1d4c1eSOmar Sandoval if (lo->lo_state != Lo_bound) { 13366cc8e743SPavel Tatashin mutex_unlock(&lo->lo_mutex); 13371da177e4SLinus Torvalds return -ENXIO; 13382d1d4c1eSOmar Sandoval } 13392d1d4c1eSOmar Sandoval 13401da177e4SLinus Torvalds memset(info, 0, sizeof(*info)); 13411da177e4SLinus Torvalds info->lo_number = lo->lo_number; 13421da177e4SLinus Torvalds info->lo_offset = lo->lo_offset; 13431da177e4SLinus Torvalds info->lo_sizelimit = lo->lo_sizelimit; 13441da177e4SLinus Torvalds info->lo_flags = lo->lo_flags; 13451da177e4SLinus Torvalds memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE); 13462d1d4c1eSOmar Sandoval 13476cc8e743SPavel Tatashin /* Drop lo_mutex while we call into the filesystem. */ 1348b1ab5fa3STetsuo Handa path = lo->lo_backing_file->f_path; 1349b1ab5fa3STetsuo Handa path_get(&path); 13506cc8e743SPavel Tatashin mutex_unlock(&lo->lo_mutex); 1351b1ab5fa3STetsuo Handa ret = vfs_getattr(&path, &stat, STATX_INO, AT_STATX_SYNC_AS_STAT); 13522d1d4c1eSOmar Sandoval if (!ret) { 13532d1d4c1eSOmar Sandoval info->lo_device = huge_encode_dev(stat.dev); 13542d1d4c1eSOmar Sandoval info->lo_inode = stat.ino; 13552d1d4c1eSOmar Sandoval info->lo_rdevice = huge_encode_dev(stat.rdev); 13562d1d4c1eSOmar Sandoval } 1357b1ab5fa3STetsuo Handa path_put(&path); 13582d1d4c1eSOmar Sandoval return ret; 13591da177e4SLinus Torvalds } 13601da177e4SLinus Torvalds 13611da177e4SLinus Torvalds static void 13621da177e4SLinus Torvalds loop_info64_from_old(const struct loop_info *info, struct loop_info64 *info64) 13631da177e4SLinus Torvalds { 13641da177e4SLinus Torvalds memset(info64, 0, sizeof(*info64)); 13651da177e4SLinus Torvalds info64->lo_number = info->lo_number; 13661da177e4SLinus Torvalds info64->lo_device = info->lo_device; 13671da177e4SLinus Torvalds info64->lo_inode = info->lo_inode; 13681da177e4SLinus Torvalds info64->lo_rdevice = info->lo_rdevice; 13691da177e4SLinus Torvalds info64->lo_offset = info->lo_offset; 13701da177e4SLinus Torvalds info64->lo_sizelimit = 0; 13711da177e4SLinus Torvalds info64->lo_flags = info->lo_flags; 13721da177e4SLinus Torvalds memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE); 13731da177e4SLinus Torvalds } 13741da177e4SLinus Torvalds 13751da177e4SLinus Torvalds static int 13761da177e4SLinus Torvalds loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info) 13771da177e4SLinus Torvalds { 13781da177e4SLinus Torvalds memset(info, 0, sizeof(*info)); 13791da177e4SLinus Torvalds info->lo_number = info64->lo_number; 13801da177e4SLinus Torvalds info->lo_device = info64->lo_device; 13811da177e4SLinus Torvalds info->lo_inode = info64->lo_inode; 13821da177e4SLinus Torvalds info->lo_rdevice = info64->lo_rdevice; 13831da177e4SLinus Torvalds info->lo_offset = info64->lo_offset; 13841da177e4SLinus Torvalds info->lo_flags = info64->lo_flags; 13851da177e4SLinus Torvalds memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE); 13861da177e4SLinus Torvalds 13871da177e4SLinus Torvalds /* error in case values were truncated */ 13881da177e4SLinus Torvalds if (info->lo_device != info64->lo_device || 13891da177e4SLinus Torvalds info->lo_rdevice != info64->lo_rdevice || 13901da177e4SLinus Torvalds info->lo_inode != info64->lo_inode || 13911da177e4SLinus Torvalds info->lo_offset != info64->lo_offset) 13921da177e4SLinus Torvalds return -EOVERFLOW; 13931da177e4SLinus Torvalds 13941da177e4SLinus Torvalds return 0; 13951da177e4SLinus Torvalds } 13961da177e4SLinus Torvalds 13971da177e4SLinus Torvalds static int 13981da177e4SLinus Torvalds loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg) 13991da177e4SLinus Torvalds { 14001da177e4SLinus Torvalds struct loop_info info; 14011da177e4SLinus Torvalds struct loop_info64 info64; 14021da177e4SLinus Torvalds 14031da177e4SLinus Torvalds if (copy_from_user(&info, arg, sizeof (struct loop_info))) 14041da177e4SLinus Torvalds return -EFAULT; 14051da177e4SLinus Torvalds loop_info64_from_old(&info, &info64); 14061da177e4SLinus Torvalds return loop_set_status(lo, &info64); 14071da177e4SLinus Torvalds } 14081da177e4SLinus Torvalds 14091da177e4SLinus Torvalds static int 14101da177e4SLinus Torvalds loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg) 14111da177e4SLinus Torvalds { 14121da177e4SLinus Torvalds struct loop_info64 info64; 14131da177e4SLinus Torvalds 14141da177e4SLinus Torvalds if (copy_from_user(&info64, arg, sizeof (struct loop_info64))) 14151da177e4SLinus Torvalds return -EFAULT; 14161da177e4SLinus Torvalds return loop_set_status(lo, &info64); 14171da177e4SLinus Torvalds } 14181da177e4SLinus Torvalds 14191da177e4SLinus Torvalds static int 14201da177e4SLinus Torvalds loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) { 14211da177e4SLinus Torvalds struct loop_info info; 14221da177e4SLinus Torvalds struct loop_info64 info64; 1423bdac616dSOmar Sandoval int err; 14241da177e4SLinus Torvalds 14254a5ce9baSJan Kara if (!arg) 1426bdac616dSOmar Sandoval return -EINVAL; 14271da177e4SLinus Torvalds err = loop_get_status(lo, &info64); 14281da177e4SLinus Torvalds if (!err) 14291da177e4SLinus Torvalds err = loop_info64_to_old(&info64, &info); 14301da177e4SLinus Torvalds if (!err && copy_to_user(arg, &info, sizeof(info))) 14311da177e4SLinus Torvalds err = -EFAULT; 14321da177e4SLinus Torvalds 14331da177e4SLinus Torvalds return err; 14341da177e4SLinus Torvalds } 14351da177e4SLinus Torvalds 14361da177e4SLinus Torvalds static int 14371da177e4SLinus Torvalds loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) { 14381da177e4SLinus Torvalds struct loop_info64 info64; 1439bdac616dSOmar Sandoval int err; 14401da177e4SLinus Torvalds 14414a5ce9baSJan Kara if (!arg) 1442bdac616dSOmar Sandoval return -EINVAL; 14431da177e4SLinus Torvalds err = loop_get_status(lo, &info64); 14441da177e4SLinus Torvalds if (!err && copy_to_user(arg, &info64, sizeof(info64))) 14451da177e4SLinus Torvalds err = -EFAULT; 14461da177e4SLinus Torvalds 14471da177e4SLinus Torvalds return err; 14481da177e4SLinus Torvalds } 14491da177e4SLinus Torvalds 145051001b7dSHannes Reinecke static int loop_set_capacity(struct loop_device *lo) 145153d66608SJ. R. Okajima { 14520a6ed1b5SMartijn Coenen loff_t size; 14530a6ed1b5SMartijn Coenen 145453d66608SJ. R. Okajima if (unlikely(lo->lo_state != Lo_bound)) 14557b0576a3SGuo Chao return -ENXIO; 145653d66608SJ. R. Okajima 14570a6ed1b5SMartijn Coenen size = get_loop_size(lo, lo->lo_backing_file); 14580a6ed1b5SMartijn Coenen loop_set_size(lo, size); 1459083a6a50SMartijn Coenen 1460083a6a50SMartijn Coenen return 0; 146153d66608SJ. R. Okajima } 146253d66608SJ. R. Okajima 1463ab1cb278SMing Lei static int loop_set_dio(struct loop_device *lo, unsigned long arg) 1464ab1cb278SMing Lei { 1465dc909525SChristoph Hellwig bool use_dio = !!arg; 14661e1a9cecSChristoph Hellwig unsigned int memflags; 1467ab1cb278SMing Lei 1468dc909525SChristoph Hellwig if (lo->lo_state != Lo_bound) 1469dc909525SChristoph Hellwig return -ENXIO; 1470afd69d5cSChristoph Hellwig if (use_dio == !!(lo->lo_flags & LO_FLAGS_DIRECT_IO)) 1471ab1cb278SMing Lei return 0; 1472dc909525SChristoph Hellwig 1473dc909525SChristoph Hellwig if (use_dio) { 1474dc909525SChristoph Hellwig if (!lo_can_use_dio(lo)) 1475dc909525SChristoph Hellwig return -EINVAL; 1476dc909525SChristoph Hellwig /* flush dirty pages before starting to use direct I/O */ 1477dc909525SChristoph Hellwig vfs_fsync(lo->lo_backing_file, 0); 1478dc909525SChristoph Hellwig } 1479dc909525SChristoph Hellwig 14801e1a9cecSChristoph Hellwig memflags = blk_mq_freeze_queue(lo->lo_queue); 1481dc909525SChristoph Hellwig if (use_dio) 1482dc909525SChristoph Hellwig lo->lo_flags |= LO_FLAGS_DIRECT_IO; 1483dc909525SChristoph Hellwig else 1484dc909525SChristoph Hellwig lo->lo_flags &= ~LO_FLAGS_DIRECT_IO; 14851e1a9cecSChristoph Hellwig blk_mq_unfreeze_queue(lo->lo_queue, memflags); 1486dc909525SChristoph Hellwig return 0; 1487ab1cb278SMing Lei } 1488ab1cb278SMing Lei 148989e4fdecSOmar Sandoval static int loop_set_block_size(struct loop_device *lo, unsigned long arg) 149089e4fdecSOmar Sandoval { 1491b38c8be2SChristoph Hellwig struct queue_limits lim; 14921e1a9cecSChristoph Hellwig unsigned int memflags; 14935db470e2SJaegeuk Kim int err = 0; 14945db470e2SJaegeuk Kim 149589e4fdecSOmar Sandoval if (lo->lo_state != Lo_bound) 149689e4fdecSOmar Sandoval return -ENXIO; 149789e4fdecSOmar Sandoval 14987e81f99aSMartijn Coenen if (lo->lo_queue->limits.logical_block_size == arg) 14997e81f99aSMartijn Coenen return 0; 15007e81f99aSMartijn Coenen 15015db470e2SJaegeuk Kim sync_blockdev(lo->lo_device); 1502f4bd34b1SZheng Bin invalidate_bdev(lo->lo_device); 15035db470e2SJaegeuk Kim 1504b38c8be2SChristoph Hellwig lim = queue_limits_start_update(lo->lo_queue); 1505b38c8be2SChristoph Hellwig loop_update_limits(lo, &lim, arg); 1506b03732a9SChristoph Hellwig 15071e1a9cecSChristoph Hellwig memflags = blk_mq_freeze_queue(lo->lo_queue); 1508b38c8be2SChristoph Hellwig err = queue_limits_commit_update(lo->lo_queue, &lim); 150989e4fdecSOmar Sandoval loop_update_dio(lo); 15101e1a9cecSChristoph Hellwig blk_mq_unfreeze_queue(lo->lo_queue, memflags); 151189e4fdecSOmar Sandoval 15125db470e2SJaegeuk Kim return err; 151389e4fdecSOmar Sandoval } 151489e4fdecSOmar Sandoval 1515a1316544SJan Kara static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd, 1516a1316544SJan Kara unsigned long arg) 15171da177e4SLinus Torvalds { 15181da177e4SLinus Torvalds int err; 15191da177e4SLinus Torvalds 15206cc8e743SPavel Tatashin err = mutex_lock_killable(&lo->lo_mutex); 15213148ffbdSOmar Sandoval if (err) 1522a1316544SJan Kara return err; 15231da177e4SLinus Torvalds switch (cmd) { 152453d66608SJ. R. Okajima case LOOP_SET_CAPACITY: 152551001b7dSHannes Reinecke err = loop_set_capacity(lo); 152653d66608SJ. R. Okajima break; 1527ab1cb278SMing Lei case LOOP_SET_DIRECT_IO: 1528ab1cb278SMing Lei err = loop_set_dio(lo, arg); 1529ab1cb278SMing Lei break; 153089e4fdecSOmar Sandoval case LOOP_SET_BLOCK_SIZE: 153189e4fdecSOmar Sandoval err = loop_set_block_size(lo, arg); 153289e4fdecSOmar Sandoval break; 15331da177e4SLinus Torvalds default: 153447e96246SChristoph Hellwig err = -EINVAL; 15351da177e4SLinus Torvalds } 15366cc8e743SPavel Tatashin mutex_unlock(&lo->lo_mutex); 1537a1316544SJan Kara return err; 1538a1316544SJan Kara } 1539f028f3b2SNikanth Karthikesan 154005bdb996SChristoph Hellwig static int lo_ioctl(struct block_device *bdev, blk_mode_t mode, 1541a1316544SJan Kara unsigned int cmd, unsigned long arg) 1542a1316544SJan Kara { 1543a1316544SJan Kara struct loop_device *lo = bdev->bd_disk->private_data; 1544571fae6eSMartijn Coenen void __user *argp = (void __user *) arg; 1545a1316544SJan Kara int err; 1546a1316544SJan Kara 1547a1316544SJan Kara switch (cmd) { 15483448914eSMartijn Coenen case LOOP_SET_FD: { 15493448914eSMartijn Coenen /* 15503448914eSMartijn Coenen * Legacy case - pass in a zeroed out struct loop_config with 15513448914eSMartijn Coenen * only the file descriptor set , which corresponds with the 15523448914eSMartijn Coenen * default parameters we'd have used otherwise. 15533448914eSMartijn Coenen */ 15543448914eSMartijn Coenen struct loop_config config; 15553448914eSMartijn Coenen 15563448914eSMartijn Coenen memset(&config, 0, sizeof(config)); 15573448914eSMartijn Coenen config.fd = arg; 15583448914eSMartijn Coenen 15593448914eSMartijn Coenen return loop_configure(lo, mode, bdev, &config); 15603448914eSMartijn Coenen } 15613448914eSMartijn Coenen case LOOP_CONFIGURE: { 15623448914eSMartijn Coenen struct loop_config config; 15633448914eSMartijn Coenen 15643448914eSMartijn Coenen if (copy_from_user(&config, argp, sizeof(config))) 15653448914eSMartijn Coenen return -EFAULT; 15663448914eSMartijn Coenen 15673448914eSMartijn Coenen return loop_configure(lo, mode, bdev, &config); 15683448914eSMartijn Coenen } 1569a1316544SJan Kara case LOOP_CHANGE_FD: 1570c3710770SJan Kara return loop_change_fd(lo, bdev, arg); 1571a1316544SJan Kara case LOOP_CLR_FD: 15727ccd0791SJan Kara return loop_clr_fd(lo); 1573a1316544SJan Kara case LOOP_SET_STATUS: 1574a1316544SJan Kara err = -EPERM; 157505bdb996SChristoph Hellwig if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN)) 1576571fae6eSMartijn Coenen err = loop_set_status_old(lo, argp); 1577a1316544SJan Kara break; 1578a1316544SJan Kara case LOOP_GET_STATUS: 1579571fae6eSMartijn Coenen return loop_get_status_old(lo, argp); 1580a1316544SJan Kara case LOOP_SET_STATUS64: 1581a1316544SJan Kara err = -EPERM; 158205bdb996SChristoph Hellwig if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN)) 1583571fae6eSMartijn Coenen err = loop_set_status64(lo, argp); 1584a1316544SJan Kara break; 1585a1316544SJan Kara case LOOP_GET_STATUS64: 1586571fae6eSMartijn Coenen return loop_get_status64(lo, argp); 1587a1316544SJan Kara case LOOP_SET_CAPACITY: 1588a1316544SJan Kara case LOOP_SET_DIRECT_IO: 1589a1316544SJan Kara case LOOP_SET_BLOCK_SIZE: 159005bdb996SChristoph Hellwig if (!(mode & BLK_OPEN_WRITE) && !capable(CAP_SYS_ADMIN)) 1591a1316544SJan Kara return -EPERM; 1592df561f66SGustavo A. R. Silva fallthrough; 1593a1316544SJan Kara default: 1594a1316544SJan Kara err = lo_simple_ioctl(lo, cmd, arg); 1595a1316544SJan Kara break; 1596a1316544SJan Kara } 1597a1316544SJan Kara 15981da177e4SLinus Torvalds return err; 15991da177e4SLinus Torvalds } 16001da177e4SLinus Torvalds 1601863d5b82SDavid Howells #ifdef CONFIG_COMPAT 1602863d5b82SDavid Howells struct compat_loop_info { 1603863d5b82SDavid Howells compat_int_t lo_number; /* ioctl r/o */ 1604863d5b82SDavid Howells compat_dev_t lo_device; /* ioctl r/o */ 1605863d5b82SDavid Howells compat_ulong_t lo_inode; /* ioctl r/o */ 1606863d5b82SDavid Howells compat_dev_t lo_rdevice; /* ioctl r/o */ 1607863d5b82SDavid Howells compat_int_t lo_offset; 1608f941c51eSCarlos Llamas compat_int_t lo_encrypt_type; /* obsolete, ignored */ 1609863d5b82SDavid Howells compat_int_t lo_encrypt_key_size; /* ioctl w/o */ 1610863d5b82SDavid Howells compat_int_t lo_flags; /* ioctl r/o */ 1611863d5b82SDavid Howells char lo_name[LO_NAME_SIZE]; 1612863d5b82SDavid Howells unsigned char lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */ 1613863d5b82SDavid Howells compat_ulong_t lo_init[2]; 1614863d5b82SDavid Howells char reserved[4]; 1615863d5b82SDavid Howells }; 1616863d5b82SDavid Howells 1617863d5b82SDavid Howells /* 1618863d5b82SDavid Howells * Transfer 32-bit compatibility structure in userspace to 64-bit loop info 1619863d5b82SDavid Howells * - noinlined to reduce stack space usage in main part of driver 1620863d5b82SDavid Howells */ 1621863d5b82SDavid Howells static noinline int 1622ba674cfcSAl Viro loop_info64_from_compat(const struct compat_loop_info __user *arg, 1623863d5b82SDavid Howells struct loop_info64 *info64) 1624863d5b82SDavid Howells { 1625863d5b82SDavid Howells struct compat_loop_info info; 1626863d5b82SDavid Howells 1627863d5b82SDavid Howells if (copy_from_user(&info, arg, sizeof(info))) 1628863d5b82SDavid Howells return -EFAULT; 1629863d5b82SDavid Howells 1630863d5b82SDavid Howells memset(info64, 0, sizeof(*info64)); 1631863d5b82SDavid Howells info64->lo_number = info.lo_number; 1632863d5b82SDavid Howells info64->lo_device = info.lo_device; 1633863d5b82SDavid Howells info64->lo_inode = info.lo_inode; 1634863d5b82SDavid Howells info64->lo_rdevice = info.lo_rdevice; 1635863d5b82SDavid Howells info64->lo_offset = info.lo_offset; 1636863d5b82SDavid Howells info64->lo_sizelimit = 0; 1637863d5b82SDavid Howells info64->lo_flags = info.lo_flags; 1638863d5b82SDavid Howells memcpy(info64->lo_file_name, info.lo_name, LO_NAME_SIZE); 1639863d5b82SDavid Howells return 0; 1640863d5b82SDavid Howells } 1641863d5b82SDavid Howells 1642863d5b82SDavid Howells /* 1643863d5b82SDavid Howells * Transfer 64-bit loop info to 32-bit compatibility structure in userspace 1644863d5b82SDavid Howells * - noinlined to reduce stack space usage in main part of driver 1645863d5b82SDavid Howells */ 1646863d5b82SDavid Howells static noinline int 1647863d5b82SDavid Howells loop_info64_to_compat(const struct loop_info64 *info64, 1648863d5b82SDavid Howells struct compat_loop_info __user *arg) 1649863d5b82SDavid Howells { 1650863d5b82SDavid Howells struct compat_loop_info info; 1651863d5b82SDavid Howells 1652863d5b82SDavid Howells memset(&info, 0, sizeof(info)); 1653863d5b82SDavid Howells info.lo_number = info64->lo_number; 1654863d5b82SDavid Howells info.lo_device = info64->lo_device; 1655863d5b82SDavid Howells info.lo_inode = info64->lo_inode; 1656863d5b82SDavid Howells info.lo_rdevice = info64->lo_rdevice; 1657863d5b82SDavid Howells info.lo_offset = info64->lo_offset; 1658863d5b82SDavid Howells info.lo_flags = info64->lo_flags; 1659863d5b82SDavid Howells memcpy(info.lo_name, info64->lo_file_name, LO_NAME_SIZE); 1660863d5b82SDavid Howells 1661863d5b82SDavid Howells /* error in case values were truncated */ 1662863d5b82SDavid Howells if (info.lo_device != info64->lo_device || 1663863d5b82SDavid Howells info.lo_rdevice != info64->lo_rdevice || 1664863d5b82SDavid Howells info.lo_inode != info64->lo_inode || 166547e96246SChristoph Hellwig info.lo_offset != info64->lo_offset) 1666863d5b82SDavid Howells return -EOVERFLOW; 1667863d5b82SDavid Howells 1668863d5b82SDavid Howells if (copy_to_user(arg, &info, sizeof(info))) 1669863d5b82SDavid Howells return -EFAULT; 1670863d5b82SDavid Howells return 0; 1671863d5b82SDavid Howells } 1672863d5b82SDavid Howells 1673863d5b82SDavid Howells static int 1674863d5b82SDavid Howells loop_set_status_compat(struct loop_device *lo, 1675863d5b82SDavid Howells const struct compat_loop_info __user *arg) 1676863d5b82SDavid Howells { 1677863d5b82SDavid Howells struct loop_info64 info64; 1678863d5b82SDavid Howells int ret; 1679863d5b82SDavid Howells 1680863d5b82SDavid Howells ret = loop_info64_from_compat(arg, &info64); 1681863d5b82SDavid Howells if (ret < 0) 1682863d5b82SDavid Howells return ret; 1683863d5b82SDavid Howells return loop_set_status(lo, &info64); 1684863d5b82SDavid Howells } 1685863d5b82SDavid Howells 1686863d5b82SDavid Howells static int 1687863d5b82SDavid Howells loop_get_status_compat(struct loop_device *lo, 1688863d5b82SDavid Howells struct compat_loop_info __user *arg) 1689863d5b82SDavid Howells { 1690863d5b82SDavid Howells struct loop_info64 info64; 1691bdac616dSOmar Sandoval int err; 1692863d5b82SDavid Howells 16934a5ce9baSJan Kara if (!arg) 1694bdac616dSOmar Sandoval return -EINVAL; 1695863d5b82SDavid Howells err = loop_get_status(lo, &info64); 1696863d5b82SDavid Howells if (!err) 1697863d5b82SDavid Howells err = loop_info64_to_compat(&info64, arg); 1698863d5b82SDavid Howells return err; 1699863d5b82SDavid Howells } 1700863d5b82SDavid Howells 170105bdb996SChristoph Hellwig static int lo_compat_ioctl(struct block_device *bdev, blk_mode_t mode, 1702bb214884SAl Viro unsigned int cmd, unsigned long arg) 1703863d5b82SDavid Howells { 1704bb214884SAl Viro struct loop_device *lo = bdev->bd_disk->private_data; 1705863d5b82SDavid Howells int err; 1706863d5b82SDavid Howells 1707863d5b82SDavid Howells switch(cmd) { 1708863d5b82SDavid Howells case LOOP_SET_STATUS: 17093148ffbdSOmar Sandoval err = loop_set_status_compat(lo, 17103148ffbdSOmar Sandoval (const struct compat_loop_info __user *)arg); 1711863d5b82SDavid Howells break; 1712863d5b82SDavid Howells case LOOP_GET_STATUS: 17133148ffbdSOmar Sandoval err = loop_get_status_compat(lo, 17143148ffbdSOmar Sandoval (struct compat_loop_info __user *)arg); 1715863d5b82SDavid Howells break; 171653d66608SJ. R. Okajima case LOOP_SET_CAPACITY: 1717863d5b82SDavid Howells case LOOP_CLR_FD: 1718863d5b82SDavid Howells case LOOP_GET_STATUS64: 1719863d5b82SDavid Howells case LOOP_SET_STATUS64: 17203448914eSMartijn Coenen case LOOP_CONFIGURE: 1721863d5b82SDavid Howells arg = (unsigned long) compat_ptr(arg); 1722df561f66SGustavo A. R. Silva fallthrough; 1723863d5b82SDavid Howells case LOOP_SET_FD: 1724863d5b82SDavid Howells case LOOP_CHANGE_FD: 17259fea4b39SEvan Green case LOOP_SET_BLOCK_SIZE: 1726fdbe4eeeSAlessio Balsini case LOOP_SET_DIRECT_IO: 1727bb214884SAl Viro err = lo_ioctl(bdev, mode, cmd, arg); 1728863d5b82SDavid Howells break; 1729863d5b82SDavid Howells default: 1730863d5b82SDavid Howells err = -ENOIOCTLCMD; 1731863d5b82SDavid Howells break; 1732863d5b82SDavid Howells } 1733863d5b82SDavid Howells return err; 1734863d5b82SDavid Howells } 1735863d5b82SDavid Howells #endif 1736863d5b82SDavid Howells 173718048c1aSGulam Mohamed static int lo_open(struct gendisk *disk, blk_mode_t mode) 173818048c1aSGulam Mohamed { 173918048c1aSGulam Mohamed struct loop_device *lo = disk->private_data; 174018048c1aSGulam Mohamed int err; 174118048c1aSGulam Mohamed 174218048c1aSGulam Mohamed err = mutex_lock_killable(&lo->lo_mutex); 174318048c1aSGulam Mohamed if (err) 174418048c1aSGulam Mohamed return err; 174518048c1aSGulam Mohamed 174618048c1aSGulam Mohamed if (lo->lo_state == Lo_deleting || lo->lo_state == Lo_rundown) 174718048c1aSGulam Mohamed err = -ENXIO; 174818048c1aSGulam Mohamed mutex_unlock(&lo->lo_mutex); 174918048c1aSGulam Mohamed return err; 175018048c1aSGulam Mohamed } 175118048c1aSGulam Mohamed 1752ae220766SChristoph Hellwig static void lo_release(struct gendisk *disk) 17531da177e4SLinus Torvalds { 17546cc8e743SPavel Tatashin struct loop_device *lo = disk->private_data; 175518048c1aSGulam Mohamed bool need_clear = false; 17561da177e4SLinus Torvalds 1757a0e286b6SChristoph Hellwig if (disk_openers(disk) > 0) 1758a0e286b6SChristoph Hellwig return; 175918048c1aSGulam Mohamed /* 176018048c1aSGulam Mohamed * Clear the backing device information if this is the last close of 176118048c1aSGulam Mohamed * a device that's been marked for auto clear, or on which LOOP_CLR_FD 176218048c1aSGulam Mohamed * has been called. 176318048c1aSGulam Mohamed */ 1764f8933667SMing Lei 1765a0e286b6SChristoph Hellwig mutex_lock(&lo->lo_mutex); 176618048c1aSGulam Mohamed if (lo->lo_state == Lo_bound && (lo->lo_flags & LO_FLAGS_AUTOCLEAR)) 1767a2505b79SJan Kara lo->lo_state = Lo_rundown; 176818048c1aSGulam Mohamed 176918048c1aSGulam Mohamed need_clear = (lo->lo_state == Lo_rundown); 17706cc8e743SPavel Tatashin mutex_unlock(&lo->lo_mutex); 177118048c1aSGulam Mohamed 177218048c1aSGulam Mohamed if (need_clear) 177318048c1aSGulam Mohamed __loop_clr_fd(lo); 177414f27939SMilan Broz } 177596c58655SDavid Woodhouse 1776d2c7f56fSChristoph Hellwig static void lo_free_disk(struct gendisk *disk) 1777d2c7f56fSChristoph Hellwig { 1778d2c7f56fSChristoph Hellwig struct loop_device *lo = disk->private_data; 1779d2c7f56fSChristoph Hellwig 1780d292dc80SChristoph Hellwig if (lo->workqueue) 1781d292dc80SChristoph Hellwig destroy_workqueue(lo->workqueue); 1782d292dc80SChristoph Hellwig loop_free_idle_workers(lo, true); 1783292a089dSSteven Rostedt (Google) timer_shutdown_sync(&lo->timer); 1784d2c7f56fSChristoph Hellwig mutex_destroy(&lo->lo_mutex); 1785d2c7f56fSChristoph Hellwig kfree(lo); 1786ae665016SLinus Torvalds } 1787ae665016SLinus Torvalds 178883d5cde4SAlexey Dobriyan static const struct block_device_operations lo_fops = { 17891da177e4SLinus Torvalds .owner = THIS_MODULE, 179018048c1aSGulam Mohamed .open = lo_open, 1791bb214884SAl Viro .release = lo_release, 1792bb214884SAl Viro .ioctl = lo_ioctl, 1793863d5b82SDavid Howells #ifdef CONFIG_COMPAT 1794bb214884SAl Viro .compat_ioctl = lo_compat_ioctl, 1795863d5b82SDavid Howells #endif 1796d2c7f56fSChristoph Hellwig .free_disk = lo_free_disk, 17971da177e4SLinus Torvalds }; 17981da177e4SLinus Torvalds 17991da177e4SLinus Torvalds /* 18001da177e4SLinus Torvalds * And now the modules code and kernel interface. 18011da177e4SLinus Torvalds */ 180285c50197SIsaac J. Manjarres 180385c50197SIsaac J. Manjarres /* 180485c50197SIsaac J. Manjarres * If max_loop is specified, create that many devices upfront. 180585c50197SIsaac J. Manjarres * This also becomes a hard limit. If max_loop is not specified, 1806bb5faa99SMauricio Faria de Oliveira * the default isn't a hard limit (as before commit 85c50197716c 1807bb5faa99SMauricio Faria de Oliveira * changed the default value from 0 for max_loop=0 reasons), just 180885c50197SIsaac J. Manjarres * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module 180985c50197SIsaac J. Manjarres * init time. Loop devices can be requested on-demand with the 181085c50197SIsaac J. Manjarres * /dev/loop-control interface, or be instantiated by accessing 181185c50197SIsaac J. Manjarres * a 'dead' device node. 181285c50197SIsaac J. Manjarres */ 181385c50197SIsaac J. Manjarres static int max_loop = CONFIG_BLK_DEV_LOOP_MIN_COUNT; 1814bb5faa99SMauricio Faria de Oliveira 1815bb5faa99SMauricio Faria de Oliveira #ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD 1816bb5faa99SMauricio Faria de Oliveira static bool max_loop_specified; 1817bb5faa99SMauricio Faria de Oliveira 1818bb5faa99SMauricio Faria de Oliveira static int max_loop_param_set_int(const char *val, 1819bb5faa99SMauricio Faria de Oliveira const struct kernel_param *kp) 1820bb5faa99SMauricio Faria de Oliveira { 1821bb5faa99SMauricio Faria de Oliveira int ret; 1822bb5faa99SMauricio Faria de Oliveira 1823bb5faa99SMauricio Faria de Oliveira ret = param_set_int(val, kp); 1824bb5faa99SMauricio Faria de Oliveira if (ret < 0) 1825bb5faa99SMauricio Faria de Oliveira return ret; 1826bb5faa99SMauricio Faria de Oliveira 1827bb5faa99SMauricio Faria de Oliveira max_loop_specified = true; 1828bb5faa99SMauricio Faria de Oliveira return 0; 1829bb5faa99SMauricio Faria de Oliveira } 1830bb5faa99SMauricio Faria de Oliveira 1831bb5faa99SMauricio Faria de Oliveira static const struct kernel_param_ops max_loop_param_ops = { 1832bb5faa99SMauricio Faria de Oliveira .set = max_loop_param_set_int, 1833bb5faa99SMauricio Faria de Oliveira .get = param_get_int, 1834bb5faa99SMauricio Faria de Oliveira }; 1835bb5faa99SMauricio Faria de Oliveira 1836bb5faa99SMauricio Faria de Oliveira module_param_cb(max_loop, &max_loop_param_ops, &max_loop, 0444); 1837a47653fcSKen Chen MODULE_PARM_DESC(max_loop, "Maximum number of loop devices"); 1838bb5faa99SMauricio Faria de Oliveira #else 1839bb5faa99SMauricio Faria de Oliveira module_param(max_loop, int, 0444); 1840bb5faa99SMauricio Faria de Oliveira MODULE_PARM_DESC(max_loop, "Initial number of loop devices"); 1841bb5faa99SMauricio Faria de Oliveira #endif 1842bb5faa99SMauricio Faria de Oliveira 18435657a819SJoe Perches module_param(max_part, int, 0444); 1844476a4813SLaurent Vivier MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device"); 1845ef44c508SChaitanya Kulkarni 1846ef44c508SChaitanya Kulkarni static int hw_queue_depth = LOOP_DEFAULT_HW_Q_DEPTH; 1847ef44c508SChaitanya Kulkarni 1848ef44c508SChaitanya Kulkarni static int loop_set_hw_queue_depth(const char *s, const struct kernel_param *p) 1849ef44c508SChaitanya Kulkarni { 1850e152a05fSBart Van Assche int qd, ret; 1851ef44c508SChaitanya Kulkarni 1852e152a05fSBart Van Assche ret = kstrtoint(s, 0, &qd); 1853e152a05fSBart Van Assche if (ret < 0) 1854e152a05fSBart Van Assche return ret; 1855e152a05fSBart Van Assche if (qd < 1) 1856e152a05fSBart Van Assche return -EINVAL; 1857e152a05fSBart Van Assche hw_queue_depth = qd; 1858e152a05fSBart Van Assche return 0; 1859ef44c508SChaitanya Kulkarni } 1860ef44c508SChaitanya Kulkarni 1861ef44c508SChaitanya Kulkarni static const struct kernel_param_ops loop_hw_qdepth_param_ops = { 1862ef44c508SChaitanya Kulkarni .set = loop_set_hw_queue_depth, 1863ef44c508SChaitanya Kulkarni .get = param_get_int, 1864ef44c508SChaitanya Kulkarni }; 1865ef44c508SChaitanya Kulkarni 1866ef44c508SChaitanya Kulkarni device_param_cb(hw_queue_depth, &loop_hw_qdepth_param_ops, &hw_queue_depth, 0444); 1867e152a05fSBart Van Assche MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: " __stringify(LOOP_DEFAULT_HW_Q_DEPTH)); 1868ef44c508SChaitanya Kulkarni 18697d4425d2SJeff Johnson MODULE_DESCRIPTION("Loopback device support"); 18701da177e4SLinus Torvalds MODULE_LICENSE("GPL"); 18711da177e4SLinus Torvalds MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR); 18721da177e4SLinus Torvalds 1873fc17b653SChristoph Hellwig static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx, 1874b5dd2f60SMing Lei const struct blk_mq_queue_data *bd) 1875b5dd2f60SMing Lei { 18761894e916SJens Axboe struct request *rq = bd->rq; 18771894e916SJens Axboe struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); 18781894e916SJens Axboe struct loop_device *lo = rq->q->queuedata; 1879b5dd2f60SMing Lei 18801894e916SJens Axboe blk_mq_start_request(rq); 1881b5dd2f60SMing Lei 1882f4aa4c7bSMing Lei if (lo->lo_state != Lo_bound) 1883fc17b653SChristoph Hellwig return BLK_STS_IOERR; 1884f4aa4c7bSMing Lei 18851894e916SJens Axboe switch (req_op(rq)) { 1886f0225cacSChristoph Hellwig case REQ_OP_FLUSH: 1887f0225cacSChristoph Hellwig case REQ_OP_DISCARD: 188819372e27SChristoph Hellwig case REQ_OP_WRITE_ZEROES: 1889bc07c10aSMing Lei cmd->use_aio = false; 1890f0225cacSChristoph Hellwig break; 1891f0225cacSChristoph Hellwig default: 1892afd69d5cSChristoph Hellwig cmd->use_aio = lo->lo_flags & LO_FLAGS_DIRECT_IO; 1893f0225cacSChristoph Hellwig break; 1894f0225cacSChristoph Hellwig } 1895bc07c10aSMing Lei 1896d4478e92SShaohua Li /* always use the first bio's css */ 1897c74d40e8SDan Schatzberg cmd->blkcg_css = NULL; 1898c74d40e8SDan Schatzberg cmd->memcg_css = NULL; 18990b508bc9SShaohua Li #ifdef CONFIG_BLK_CGROUP 1900bbb1ebe7SChristoph Hellwig if (rq->bio) { 1901bbb1ebe7SChristoph Hellwig cmd->blkcg_css = bio_blkcg_css(rq->bio); 1902c74d40e8SDan Schatzberg #ifdef CONFIG_MEMCG 1903bbb1ebe7SChristoph Hellwig if (cmd->blkcg_css) { 1904c74d40e8SDan Schatzberg cmd->memcg_css = 1905c74d40e8SDan Schatzberg cgroup_get_e_css(cmd->blkcg_css->cgroup, 1906c74d40e8SDan Schatzberg &memory_cgrp_subsys); 1907bbb1ebe7SChristoph Hellwig } 1908d4478e92SShaohua Li #endif 1909c74d40e8SDan Schatzberg } 1910c74d40e8SDan Schatzberg #endif 191187579e9bSDan Schatzberg loop_queue_work(lo, cmd); 1912b5dd2f60SMing Lei 1913fc17b653SChristoph Hellwig return BLK_STS_OK; 1914b5dd2f60SMing Lei } 1915b5dd2f60SMing Lei 1916b5dd2f60SMing Lei static void loop_handle_cmd(struct loop_cmd *cmd) 1917b5dd2f60SMing Lei { 19189b0cb770SBart Van Assche struct cgroup_subsys_state *cmd_blkcg_css = cmd->blkcg_css; 19199b0cb770SBart Van Assche struct cgroup_subsys_state *cmd_memcg_css = cmd->memcg_css; 19201894e916SJens Axboe struct request *rq = blk_mq_rq_from_pdu(cmd); 19211894e916SJens Axboe const bool write = op_is_write(req_op(rq)); 19221894e916SJens Axboe struct loop_device *lo = rq->q->queuedata; 1923f4829a9bSChristoph Hellwig int ret = 0; 1924c74d40e8SDan Schatzberg struct mem_cgroup *old_memcg = NULL; 19259b0cb770SBart Van Assche const bool use_aio = cmd->use_aio; 1926b5dd2f60SMing Lei 1927f4829a9bSChristoph Hellwig if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) { 1928f4829a9bSChristoph Hellwig ret = -EIO; 1929b5dd2f60SMing Lei goto failed; 1930f4829a9bSChristoph Hellwig } 1931b5dd2f60SMing Lei 19329b0cb770SBart Van Assche if (cmd_blkcg_css) 19339b0cb770SBart Van Assche kthread_associate_blkcg(cmd_blkcg_css); 19349b0cb770SBart Van Assche if (cmd_memcg_css) 1935c74d40e8SDan Schatzberg old_memcg = set_active_memcg( 19369b0cb770SBart Van Assche mem_cgroup_from_css(cmd_memcg_css)); 1937c74d40e8SDan Schatzberg 19389b0cb770SBart Van Assche /* 19399b0cb770SBart Van Assche * do_req_filebacked() may call blk_mq_complete_request() synchronously 19409b0cb770SBart Van Assche * or asynchronously if using aio. Hence, do not touch 'cmd' after 19419b0cb770SBart Van Assche * do_req_filebacked() has returned unless we are sure that 'cmd' has 19429b0cb770SBart Van Assche * not yet been completed. 19439b0cb770SBart Van Assche */ 19441894e916SJens Axboe ret = do_req_filebacked(lo, rq); 1945c74d40e8SDan Schatzberg 19469b0cb770SBart Van Assche if (cmd_blkcg_css) 1947c74d40e8SDan Schatzberg kthread_associate_blkcg(NULL); 1948c74d40e8SDan Schatzberg 19499b0cb770SBart Van Assche if (cmd_memcg_css) { 1950c74d40e8SDan Schatzberg set_active_memcg(old_memcg); 19519b0cb770SBart Van Assche css_put(cmd_memcg_css); 1952c74d40e8SDan Schatzberg } 1953b5dd2f60SMing Lei failed: 1954bc07c10aSMing Lei /* complete non-aio request */ 19559b0cb770SBart Van Assche if (!use_aio || ret) { 19568cd55087SEvan Green if (ret == -EOPNOTSUPP) 19578cd55087SEvan Green cmd->ret = ret; 19588cd55087SEvan Green else 1959fe2cb290SChristoph Hellwig cmd->ret = ret ? -EIO : 0; 196015f73f5bSChristoph Hellwig if (likely(!blk_should_fake_timeout(rq->q))) 19611894e916SJens Axboe blk_mq_complete_request(rq); 1962fe2cb290SChristoph Hellwig } 1963b5dd2f60SMing Lei } 1964b5dd2f60SMing Lei 196587579e9bSDan Schatzberg static void loop_process_work(struct loop_worker *worker, 196687579e9bSDan Schatzberg struct list_head *cmd_list, struct loop_device *lo) 1967b5dd2f60SMing Lei { 196887579e9bSDan Schatzberg int orig_flags = current->flags; 196987579e9bSDan Schatzberg struct loop_cmd *cmd; 1970b5dd2f60SMing Lei 197187579e9bSDan Schatzberg current->flags |= PF_LOCAL_THROTTLE | PF_MEMALLOC_NOIO; 197287579e9bSDan Schatzberg spin_lock_irq(&lo->lo_work_lock); 197387579e9bSDan Schatzberg while (!list_empty(cmd_list)) { 197487579e9bSDan Schatzberg cmd = container_of( 197587579e9bSDan Schatzberg cmd_list->next, struct loop_cmd, list_entry); 197687579e9bSDan Schatzberg list_del(cmd_list->next); 197787579e9bSDan Schatzberg spin_unlock_irq(&lo->lo_work_lock); 197887579e9bSDan Schatzberg 197987579e9bSDan Schatzberg loop_handle_cmd(cmd); 198087579e9bSDan Schatzberg cond_resched(); 198187579e9bSDan Schatzberg 198287579e9bSDan Schatzberg spin_lock_irq(&lo->lo_work_lock); 198387579e9bSDan Schatzberg } 198487579e9bSDan Schatzberg 198587579e9bSDan Schatzberg /* 198687579e9bSDan Schatzberg * We only add to the idle list if there are no pending cmds 198787579e9bSDan Schatzberg * *and* the worker will not run again which ensures that it 198887579e9bSDan Schatzberg * is safe to free any worker on the idle list 198987579e9bSDan Schatzberg */ 199087579e9bSDan Schatzberg if (worker && !work_pending(&worker->work)) { 199187579e9bSDan Schatzberg worker->last_ran_at = jiffies; 199287579e9bSDan Schatzberg list_add_tail(&worker->idle_list, &lo->idle_worker_list); 199387579e9bSDan Schatzberg loop_set_timer(lo); 199487579e9bSDan Schatzberg } 199587579e9bSDan Schatzberg spin_unlock_irq(&lo->lo_work_lock); 199687579e9bSDan Schatzberg current->flags = orig_flags; 199787579e9bSDan Schatzberg } 199887579e9bSDan Schatzberg 199987579e9bSDan Schatzberg static void loop_workfn(struct work_struct *work) 200087579e9bSDan Schatzberg { 200187579e9bSDan Schatzberg struct loop_worker *worker = 200287579e9bSDan Schatzberg container_of(work, struct loop_worker, work); 200387579e9bSDan Schatzberg loop_process_work(worker, &worker->cmd_list, worker->lo); 200487579e9bSDan Schatzberg } 200587579e9bSDan Schatzberg 200687579e9bSDan Schatzberg static void loop_rootcg_workfn(struct work_struct *work) 200787579e9bSDan Schatzberg { 200887579e9bSDan Schatzberg struct loop_device *lo = 200987579e9bSDan Schatzberg container_of(work, struct loop_device, rootcg_work); 201087579e9bSDan Schatzberg loop_process_work(NULL, &lo->rootcg_cmd_list, lo); 201187579e9bSDan Schatzberg } 201287579e9bSDan Schatzberg 2013f363b089SEric Biggers static const struct blk_mq_ops loop_mq_ops = { 2014b5dd2f60SMing Lei .queue_rq = loop_queue_rq, 2015fe2cb290SChristoph Hellwig .complete = lo_complete_rq, 2016b5dd2f60SMing Lei }; 2017b5dd2f60SMing Lei 2018d6da83d0SChristoph Hellwig static int loop_add(int i) 20191da177e4SLinus Torvalds { 202002aed4a1SChristoph Hellwig struct queue_limits lim = { 202102aed4a1SChristoph Hellwig /* 202202aed4a1SChristoph Hellwig * Random number picked from the historic block max_sectors cap. 202302aed4a1SChristoph Hellwig */ 202402aed4a1SChristoph Hellwig .max_hw_sectors = 2560u, 202502aed4a1SChristoph Hellwig }; 202673285082SKen Chen struct loop_device *lo; 202773285082SKen Chen struct gendisk *disk; 202834dd82afSKay Sievers int err; 20291da177e4SLinus Torvalds 203034dd82afSKay Sievers err = -ENOMEM; 203168d740d7SSilva Paulo lo = kzalloc(sizeof(*lo), GFP_KERNEL); 203268d740d7SSilva Paulo if (!lo) 203373285082SKen Chen goto out; 2034b15ed546SChristoph Hellwig lo->worker_tree = RB_ROOT; 2035b15ed546SChristoph Hellwig INIT_LIST_HEAD(&lo->idle_worker_list); 2036b15ed546SChristoph Hellwig timer_setup(&lo->timer, loop_free_idle_workers_timer, TIMER_DEFERRABLE); 2037ef7e7c82SMikulas Patocka lo->lo_state = Lo_unbound; 2038ef7e7c82SMikulas Patocka 203918d1f200SChristoph Hellwig err = mutex_lock_killable(&loop_ctl_mutex); 204018d1f200SChristoph Hellwig if (err) 204118d1f200SChristoph Hellwig goto out_free_dev; 204218d1f200SChristoph Hellwig 2043c718aa65STejun Heo /* allocate id, if @id >= 0, we're requesting that specific id */ 204434dd82afSKay Sievers if (i >= 0) { 2045c718aa65STejun Heo err = idr_alloc(&loop_index_idr, lo, i, i + 1, GFP_KERNEL); 2046c718aa65STejun Heo if (err == -ENOSPC) 204734dd82afSKay Sievers err = -EEXIST; 204834dd82afSKay Sievers } else { 2049c718aa65STejun Heo err = idr_alloc(&loop_index_idr, lo, 0, 0, GFP_KERNEL); 205034dd82afSKay Sievers } 20511c500ad7STetsuo Handa mutex_unlock(&loop_ctl_mutex); 205234dd82afSKay Sievers if (err < 0) 20531c500ad7STetsuo Handa goto out_free_dev; 2054c718aa65STejun Heo i = err; 20551da177e4SLinus Torvalds 2056b5dd2f60SMing Lei lo->tag_set.ops = &loop_mq_ops; 2057b5dd2f60SMing Lei lo->tag_set.nr_hw_queues = 1; 2058ef44c508SChaitanya Kulkarni lo->tag_set.queue_depth = hw_queue_depth; 2059b5dd2f60SMing Lei lo->tag_set.numa_node = NUMA_NO_NODE; 2060b5dd2f60SMing Lei lo->tag_set.cmd_size = sizeof(struct loop_cmd); 2061cc76ace4SChristoph Hellwig lo->tag_set.flags = BLK_MQ_F_STACKING | BLK_MQ_F_NO_SCHED_BY_DEFAULT; 2062b5dd2f60SMing Lei lo->tag_set.driver_data = lo; 2063b5dd2f60SMing Lei 2064b5dd2f60SMing Lei err = blk_mq_alloc_tag_set(&lo->tag_set); 2065b5dd2f60SMing Lei if (err) 20663ec981e3SMikulas Patocka goto out_free_idr; 206773285082SKen Chen 206802aed4a1SChristoph Hellwig disk = lo->lo_disk = blk_mq_alloc_disk(&lo->tag_set, &lim, lo); 20691c99502fSChristoph Hellwig if (IS_ERR(disk)) { 20701c99502fSChristoph Hellwig err = PTR_ERR(disk); 2071b5dd2f60SMing Lei goto out_cleanup_tags; 2072b5dd2f60SMing Lei } 20731c99502fSChristoph Hellwig lo->lo_queue = lo->lo_disk->queue; 2074ef7e7c82SMikulas Patocka 20755b5e20f4SMing Lei /* 2076e03c8dd1SKay Sievers * Disable partition scanning by default. The in-kernel partition 2077e03c8dd1SKay Sievers * scanning can be requested individually per-device during its 2078e03c8dd1SKay Sievers * setup. Userspace can always add and remove partitions from all 2079e03c8dd1SKay Sievers * devices. The needed partition minors are allocated from the 2080e03c8dd1SKay Sievers * extended minor space, the main loop device numbers will continue 2081e03c8dd1SKay Sievers * to match the loop minors, regardless of the number of partitions 2082e03c8dd1SKay Sievers * used. 2083e03c8dd1SKay Sievers * 2084e03c8dd1SKay Sievers * If max_part is given, partition scanning is globally enabled for 2085e03c8dd1SKay Sievers * all loop devices. The minors for the main loop devices will be 2086e03c8dd1SKay Sievers * multiples of max_part. 2087e03c8dd1SKay Sievers * 2088e03c8dd1SKay Sievers * Note: Global-for-all-devices, set-only-at-init, read-only module 2089e03c8dd1SKay Sievers * parameteters like 'max_loop' and 'max_part' make things needlessly 2090e03c8dd1SKay Sievers * complicated, are too static, inflexible and may surprise 2091e03c8dd1SKay Sievers * userspace tools. Parameters like this in general should be avoided. 2092e03c8dd1SKay Sievers */ 2093e03c8dd1SKay Sievers if (!part_shift) 2094b9684a71SChristoph Hellwig set_bit(GD_SUPPRESS_PART_SCAN, &disk->state); 20956cc8e743SPavel Tatashin mutex_init(&lo->lo_mutex); 20961da177e4SLinus Torvalds lo->lo_number = i; 20971da177e4SLinus Torvalds spin_lock_init(&lo->lo_lock); 209887579e9bSDan Schatzberg spin_lock_init(&lo->lo_work_lock); 2099d292dc80SChristoph Hellwig INIT_WORK(&lo->rootcg_work, loop_rootcg_workfn); 2100d292dc80SChristoph Hellwig INIT_LIST_HEAD(&lo->rootcg_cmd_list); 21011da177e4SLinus Torvalds disk->major = LOOP_MAJOR; 2102476a4813SLaurent Vivier disk->first_minor = i << part_shift; 21031c99502fSChristoph Hellwig disk->minors = 1 << part_shift; 21041da177e4SLinus Torvalds disk->fops = &lo_fops; 21051da177e4SLinus Torvalds disk->private_data = lo; 21061da177e4SLinus Torvalds disk->queue = lo->lo_queue; 21079f65c489SMatteo Croce disk->events = DISK_EVENT_MEDIA_CHANGE; 21089f65c489SMatteo Croce disk->event_flags = DISK_EVENT_FLAG_UEVENT; 210973285082SKen Chen sprintf(disk->disk_name, "loop%d", i); 21101c500ad7STetsuo Handa /* Make this loop device reachable from pathname. */ 2111905705f0SLuis Chamberlain err = add_disk(disk); 2112905705f0SLuis Chamberlain if (err) 2113905705f0SLuis Chamberlain goto out_cleanup_disk; 2114905705f0SLuis Chamberlain 21151c500ad7STetsuo Handa /* Show this loop device. */ 21161c500ad7STetsuo Handa mutex_lock(&loop_ctl_mutex); 21171c500ad7STetsuo Handa lo->idr_visible = true; 211818d1f200SChristoph Hellwig mutex_unlock(&loop_ctl_mutex); 2119905705f0SLuis Chamberlain 212018d1f200SChristoph Hellwig return i; 212173285082SKen Chen 2122905705f0SLuis Chamberlain out_cleanup_disk: 21238b9ab626SChristoph Hellwig put_disk(disk); 2124b5dd2f60SMing Lei out_cleanup_tags: 2125b5dd2f60SMing Lei blk_mq_free_tag_set(&lo->tag_set); 21263ec981e3SMikulas Patocka out_free_idr: 21271c500ad7STetsuo Handa mutex_lock(&loop_ctl_mutex); 21283ec981e3SMikulas Patocka idr_remove(&loop_index_idr, i); 212918d1f200SChristoph Hellwig mutex_unlock(&loop_ctl_mutex); 213073285082SKen Chen out_free_dev: 213173285082SKen Chen kfree(lo); 213273285082SKen Chen out: 213334dd82afSKay Sievers return err; 21341da177e4SLinus Torvalds } 21351da177e4SLinus Torvalds 213634dd82afSKay Sievers static void loop_remove(struct loop_device *lo) 213773285082SKen Chen { 21381c500ad7STetsuo Handa /* Make this loop device unreachable from pathname. */ 21396cd18e71SNeilBrown del_gendisk(lo->lo_disk); 2140b5dd2f60SMing Lei blk_mq_free_tag_set(&lo->tag_set); 2141ce8d7861SChristoph Hellwig 21421c500ad7STetsuo Handa mutex_lock(&loop_ctl_mutex); 21431c500ad7STetsuo Handa idr_remove(&loop_index_idr, lo->lo_number); 21441c500ad7STetsuo Handa mutex_unlock(&loop_ctl_mutex); 2145d2c7f56fSChristoph Hellwig 2146d2c7f56fSChristoph Hellwig put_disk(lo->lo_disk); 214773285082SKen Chen } 214873285082SKen Chen 214923881aecSMauricio Faria de Oliveira #ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD 21508410d38cSChristoph Hellwig static void loop_probe(dev_t dev) 215173285082SKen Chen { 21528410d38cSChristoph Hellwig int idx = MINOR(dev) >> part_shift; 21538410d38cSChristoph Hellwig 2154bb5faa99SMauricio Faria de Oliveira if (max_loop_specified && max_loop && idx >= max_loop) 21558410d38cSChristoph Hellwig return; 2156d6da83d0SChristoph Hellwig loop_add(idx); 2157f9d10764SChristoph Hellwig } 215823881aecSMauricio Faria de Oliveira #else 215923881aecSMauricio Faria de Oliveira #define loop_probe NULL 216023881aecSMauricio Faria de Oliveira #endif /* !CONFIG_BLOCK_LEGACY_AUTOLOAD */ 2161f9d10764SChristoph Hellwig 2162f9d10764SChristoph Hellwig static int loop_control_remove(int idx) 2163770fe30aSKay Sievers { 2164770fe30aSKay Sievers struct loop_device *lo; 21650a42e99bSJan Kara int ret; 2166e5d66a10SChristoph Hellwig 2167e5d66a10SChristoph Hellwig if (idx < 0) { 2168e3f9387aSTetsuo Handa pr_warn_once("deleting an unspecified loop device is not supported.\n"); 2169e5d66a10SChristoph Hellwig return -EINVAL; 2170e5d66a10SChristoph Hellwig } 2171770fe30aSKay Sievers 21721c500ad7STetsuo Handa /* Hide this loop device for serialization. */ 21730a42e99bSJan Kara ret = mutex_lock_killable(&loop_ctl_mutex); 21740a42e99bSJan Kara if (ret) 21750a42e99bSJan Kara return ret; 2176b9848081SChristoph Hellwig lo = idr_find(&loop_index_idr, idx); 21771c500ad7STetsuo Handa if (!lo || !lo->idr_visible) 2178b9848081SChristoph Hellwig ret = -ENODEV; 21791c500ad7STetsuo Handa else 21801c500ad7STetsuo Handa lo->idr_visible = false; 21811c500ad7STetsuo Handa mutex_unlock(&loop_ctl_mutex); 21821c500ad7STetsuo Handa if (ret) 21831c500ad7STetsuo Handa return ret; 2184f9d10764SChristoph Hellwig 21851c500ad7STetsuo Handa /* Check whether this loop device can be removed. */ 21866cc8e743SPavel Tatashin ret = mutex_lock_killable(&lo->lo_mutex); 21876cc8e743SPavel Tatashin if (ret) 21881c500ad7STetsuo Handa goto mark_visible; 2189a0e286b6SChristoph Hellwig if (lo->lo_state != Lo_unbound || disk_openers(lo->lo_disk) > 0) { 21906cc8e743SPavel Tatashin mutex_unlock(&lo->lo_mutex); 2191770fe30aSKay Sievers ret = -EBUSY; 21921c500ad7STetsuo Handa goto mark_visible; 2193770fe30aSKay Sievers } 2194a0e286b6SChristoph Hellwig /* Mark this loop device as no more bound, but not quite unbound yet */ 2195990e7811SChristoph Hellwig lo->lo_state = Lo_deleting; 21966cc8e743SPavel Tatashin mutex_unlock(&lo->lo_mutex); 2197f9d10764SChristoph Hellwig 2198770fe30aSKay Sievers loop_remove(lo); 21991c500ad7STetsuo Handa return 0; 22001c500ad7STetsuo Handa 22011c500ad7STetsuo Handa mark_visible: 22021c500ad7STetsuo Handa /* Show this loop device again. */ 22031c500ad7STetsuo Handa mutex_lock(&loop_ctl_mutex); 22041c500ad7STetsuo Handa lo->idr_visible = true; 2205f9d10764SChristoph Hellwig mutex_unlock(&loop_ctl_mutex); 2206f9d10764SChristoph Hellwig return ret; 2207770fe30aSKay Sievers } 2208f9d10764SChristoph Hellwig 2209f9d10764SChristoph Hellwig static int loop_control_get_free(int idx) 2210f9d10764SChristoph Hellwig { 2211f9d10764SChristoph Hellwig struct loop_device *lo; 2212b9848081SChristoph Hellwig int id, ret; 2213f9d10764SChristoph Hellwig 2214f9d10764SChristoph Hellwig ret = mutex_lock_killable(&loop_ctl_mutex); 2215f9d10764SChristoph Hellwig if (ret) 2216f9d10764SChristoph Hellwig return ret; 2217b9848081SChristoph Hellwig idr_for_each_entry(&loop_index_idr, lo, id) { 22181c500ad7STetsuo Handa /* Hitting a race results in creating a new loop device which is harmless. */ 22191c500ad7STetsuo Handa if (lo->idr_visible && data_race(lo->lo_state) == Lo_unbound) 2220b9848081SChristoph Hellwig goto found; 2221b9848081SChristoph Hellwig } 22220a42e99bSJan Kara mutex_unlock(&loop_ctl_mutex); 222318d1f200SChristoph Hellwig return loop_add(-1); 2224b9848081SChristoph Hellwig found: 2225b9848081SChristoph Hellwig mutex_unlock(&loop_ctl_mutex); 2226b9848081SChristoph Hellwig return id; 2227770fe30aSKay Sievers } 2228770fe30aSKay Sievers 2229f9d10764SChristoph Hellwig static long loop_control_ioctl(struct file *file, unsigned int cmd, 2230f9d10764SChristoph Hellwig unsigned long parm) 2231f9d10764SChristoph Hellwig { 2232f9d10764SChristoph Hellwig switch (cmd) { 2233f9d10764SChristoph Hellwig case LOOP_CTL_ADD: 223418d1f200SChristoph Hellwig return loop_add(parm); 2235f9d10764SChristoph Hellwig case LOOP_CTL_REMOVE: 2236f9d10764SChristoph Hellwig return loop_control_remove(parm); 2237f9d10764SChristoph Hellwig case LOOP_CTL_GET_FREE: 2238f9d10764SChristoph Hellwig return loop_control_get_free(parm); 2239f9d10764SChristoph Hellwig default: 2240f9d10764SChristoph Hellwig return -ENOSYS; 2241f9d10764SChristoph Hellwig } 2242f9d10764SChristoph Hellwig } 2243f9d10764SChristoph Hellwig 2244770fe30aSKay Sievers static const struct file_operations loop_ctl_fops = { 2245770fe30aSKay Sievers .open = nonseekable_open, 2246770fe30aSKay Sievers .unlocked_ioctl = loop_control_ioctl, 2247770fe30aSKay Sievers .compat_ioctl = loop_control_ioctl, 2248770fe30aSKay Sievers .owner = THIS_MODULE, 2249770fe30aSKay Sievers .llseek = noop_llseek, 2250770fe30aSKay Sievers }; 2251770fe30aSKay Sievers 2252770fe30aSKay Sievers static struct miscdevice loop_misc = { 2253770fe30aSKay Sievers .minor = LOOP_CTRL_MINOR, 2254770fe30aSKay Sievers .name = "loop-control", 2255770fe30aSKay Sievers .fops = &loop_ctl_fops, 2256770fe30aSKay Sievers }; 2257770fe30aSKay Sievers 2258770fe30aSKay Sievers MODULE_ALIAS_MISCDEV(LOOP_CTRL_MINOR); 2259770fe30aSKay Sievers MODULE_ALIAS("devname:loop-control"); 2260770fe30aSKay Sievers 226173285082SKen Chen static int __init loop_init(void) 226273285082SKen Chen { 226385c50197SIsaac J. Manjarres int i; 2264770fe30aSKay Sievers int err; 2265a47653fcSKen Chen 2266476a4813SLaurent Vivier part_shift = 0; 2267ac04fee0SNamhyung Kim if (max_part > 0) { 2268476a4813SLaurent Vivier part_shift = fls(max_part); 2269476a4813SLaurent Vivier 2270ac04fee0SNamhyung Kim /* 2271ac04fee0SNamhyung Kim * Adjust max_part according to part_shift as it is exported 2272ac04fee0SNamhyung Kim * to user space so that user can decide correct minor number 2273ac04fee0SNamhyung Kim * if [s]he want to create more devices. 2274ac04fee0SNamhyung Kim * 2275ac04fee0SNamhyung Kim * Note that -1 is required because partition 0 is reserved 2276ac04fee0SNamhyung Kim * for the whole disk. 2277ac04fee0SNamhyung Kim */ 2278ac04fee0SNamhyung Kim max_part = (1UL << part_shift) - 1; 2279ac04fee0SNamhyung Kim } 2280ac04fee0SNamhyung Kim 2281b1a66504SGuo Chao if ((1UL << part_shift) > DISK_MAX_PARTS) { 2282b1a66504SGuo Chao err = -EINVAL; 2283a8c1d064SAnton Volkov goto err_out; 2284b1a66504SGuo Chao } 228578f4bb36SNamhyung Kim 2286b1a66504SGuo Chao if (max_loop > 1UL << (MINORBITS - part_shift)) { 2287b1a66504SGuo Chao err = -EINVAL; 2288a8c1d064SAnton Volkov goto err_out; 2289b1a66504SGuo Chao } 229073285082SKen Chen 2291a8c1d064SAnton Volkov err = misc_register(&loop_misc); 2292a8c1d064SAnton Volkov if (err < 0) 2293a8c1d064SAnton Volkov goto err_out; 2294a8c1d064SAnton Volkov 2295a8c1d064SAnton Volkov 22968410d38cSChristoph Hellwig if (__register_blkdev(LOOP_MAJOR, "loop", loop_probe)) { 2297b1a66504SGuo Chao err = -EIO; 2298b1a66504SGuo Chao goto misc_out; 2299b1a66504SGuo Chao } 2300a47653fcSKen Chen 2301d134b00bSKay Sievers /* pre-create number of devices given by config or max_loop */ 230285c50197SIsaac J. Manjarres for (i = 0; i < max_loop; i++) 2303d6da83d0SChristoph Hellwig loop_add(i); 230434dd82afSKay Sievers 230573285082SKen Chen printk(KERN_INFO "loop: module loaded\n"); 23061da177e4SLinus Torvalds return 0; 2307b1a66504SGuo Chao 2308b1a66504SGuo Chao misc_out: 2309b1a66504SGuo Chao misc_deregister(&loop_misc); 2310a8c1d064SAnton Volkov err_out: 2311b1a66504SGuo Chao return err; 231234dd82afSKay Sievers } 2313a47653fcSKen Chen 231473285082SKen Chen static void __exit loop_exit(void) 23151da177e4SLinus Torvalds { 23168e60947dSChristoph Hellwig struct loop_device *lo; 23178e60947dSChristoph Hellwig int id; 23188e60947dSChristoph Hellwig 23198b52d8beSChristoph Hellwig unregister_blkdev(LOOP_MAJOR, "loop"); 23208b52d8beSChristoph Hellwig misc_deregister(&loop_misc); 2321200f9337SLuis Chamberlain 23221c500ad7STetsuo Handa /* 23231c500ad7STetsuo Handa * There is no need to use loop_ctl_mutex here, for nobody else can 23241c500ad7STetsuo Handa * access loop_index_idr when this module is unloading (unless forced 23251c500ad7STetsuo Handa * module unloading is requested). If this is not a clean unloading, 23261c500ad7STetsuo Handa * we have no means to avoid kernel crash. 23271c500ad7STetsuo Handa */ 23288e60947dSChristoph Hellwig idr_for_each_entry(&loop_index_idr, lo, id) 23298e60947dSChristoph Hellwig loop_remove(lo); 2330bd5c39edSChristoph Hellwig 2331bd5c39edSChristoph Hellwig idr_destroy(&loop_index_idr); 23321da177e4SLinus Torvalds } 23331da177e4SLinus Torvalds 23341da177e4SLinus Torvalds module_init(loop_init); 23351da177e4SLinus Torvalds module_exit(loop_exit); 23361da177e4SLinus Torvalds 23371da177e4SLinus Torvalds #ifndef MODULE 23381da177e4SLinus Torvalds static int __init max_loop_setup(char *str) 23391da177e4SLinus Torvalds { 23401da177e4SLinus Torvalds max_loop = simple_strtol(str, NULL, 0); 2341bb5faa99SMauricio Faria de Oliveira #ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD 2342bb5faa99SMauricio Faria de Oliveira max_loop_specified = true; 2343bb5faa99SMauricio Faria de Oliveira #endif 23441da177e4SLinus Torvalds return 1; 23451da177e4SLinus Torvalds } 23461da177e4SLinus Torvalds 23471da177e4SLinus Torvalds __setup("max_loop=", max_loop_setup); 23481da177e4SLinus Torvalds #endif 2349