13d6392cfSJens Axboe /* 20c6a89baSFUJITA Tomonori * bsg.c - block layer implementation of the sg v4 interface 33d6392cfSJens Axboe * 43d6392cfSJens Axboe * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs 53d6392cfSJens Axboe * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com> 63d6392cfSJens Axboe * 73d6392cfSJens Axboe * This file is subject to the terms and conditions of the GNU General Public 83d6392cfSJens Axboe * License version 2. See the file "COPYING" in the main directory of this 93d6392cfSJens Axboe * archive for more details. 103d6392cfSJens Axboe * 113d6392cfSJens Axboe */ 123d6392cfSJens Axboe #include <linux/module.h> 133d6392cfSJens Axboe #include <linux/init.h> 143d6392cfSJens Axboe #include <linux/file.h> 153d6392cfSJens Axboe #include <linux/blkdev.h> 163d6392cfSJens Axboe #include <linux/poll.h> 173d6392cfSJens Axboe #include <linux/cdev.h> 18ad5ebd2fSRandy Dunlap #include <linux/jiffies.h> 193d6392cfSJens Axboe #include <linux/percpu.h> 203d6392cfSJens Axboe #include <linux/uio.h> 21598443a2SFUJITA Tomonori #include <linux/idr.h> 223d6392cfSJens Axboe #include <linux/bsg.h> 235a0e3ad6STejun Heo #include <linux/slab.h> 243d6392cfSJens Axboe 253d6392cfSJens Axboe #include <scsi/scsi.h> 263d6392cfSJens Axboe #include <scsi/scsi_ioctl.h> 273d6392cfSJens Axboe #include <scsi/scsi_cmnd.h> 284e2872d6SFUJITA Tomonori #include <scsi/scsi_device.h> 294e2872d6SFUJITA Tomonori #include <scsi/scsi_driver.h> 303d6392cfSJens Axboe #include <scsi/sg.h> 313d6392cfSJens Axboe 320ed081ceSFUJITA Tomonori #define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver" 330ed081ceSFUJITA Tomonori #define BSG_VERSION "0.4" 343d6392cfSJens Axboe 353d6392cfSJens Axboe struct bsg_device { 36165125e1SJens Axboe struct request_queue *queue; 373d6392cfSJens Axboe spinlock_t lock; 383d6392cfSJens Axboe struct list_head busy_list; 393d6392cfSJens Axboe struct list_head done_list; 403d6392cfSJens Axboe struct hlist_node dev_list; 413d6392cfSJens Axboe atomic_t ref_count; 423d6392cfSJens Axboe int queued_cmds; 433d6392cfSJens Axboe int done_cmds; 443d6392cfSJens Axboe wait_queue_head_t wq_done; 453d6392cfSJens Axboe wait_queue_head_t wq_free; 463ada8b7eSKay Sievers char name[20]; 473d6392cfSJens Axboe int max_queue; 483d6392cfSJens Axboe unsigned long flags; 493d6392cfSJens Axboe }; 503d6392cfSJens Axboe 513d6392cfSJens Axboe enum { 523d6392cfSJens Axboe BSG_F_BLOCK = 1, 533d6392cfSJens Axboe }; 543d6392cfSJens Axboe 555309cb38SJens Axboe #define BSG_DEFAULT_CMDS 64 56292b7f27SFUJITA Tomonori #define BSG_MAX_DEVS 32768 573d6392cfSJens Axboe 583d6392cfSJens Axboe #undef BSG_DEBUG 593d6392cfSJens Axboe 603d6392cfSJens Axboe #ifdef BSG_DEBUG 6124c03d47SHarvey Harrison #define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ##args) 623d6392cfSJens Axboe #else 633d6392cfSJens Axboe #define dprintk(fmt, args...) 643d6392cfSJens Axboe #endif 653d6392cfSJens Axboe 663d6392cfSJens Axboe static DEFINE_MUTEX(bsg_mutex); 67598443a2SFUJITA Tomonori static DEFINE_IDR(bsg_minor_idr); 683d6392cfSJens Axboe 6925fd1643SJens Axboe #define BSG_LIST_ARRAY_SIZE 8 7025fd1643SJens Axboe static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE]; 713d6392cfSJens Axboe 723d6392cfSJens Axboe static struct class *bsg_class; 7346f6ef4aSJens Axboe static int bsg_major; 743d6392cfSJens Axboe 755309cb38SJens Axboe static struct kmem_cache *bsg_cmd_cachep; 765309cb38SJens Axboe 773d6392cfSJens Axboe /* 783d6392cfSJens Axboe * our internal command type 793d6392cfSJens Axboe */ 803d6392cfSJens Axboe struct bsg_command { 813d6392cfSJens Axboe struct bsg_device *bd; 823d6392cfSJens Axboe struct list_head list; 833d6392cfSJens Axboe struct request *rq; 843d6392cfSJens Axboe struct bio *bio; 852c9ecdf4SFUJITA Tomonori struct bio *bidi_bio; 863d6392cfSJens Axboe int err; 8770e36eceSFUJITA Tomonori struct sg_io_v4 hdr; 883d6392cfSJens Axboe char sense[SCSI_SENSE_BUFFERSIZE]; 893d6392cfSJens Axboe }; 903d6392cfSJens Axboe 913d6392cfSJens Axboe static void bsg_free_command(struct bsg_command *bc) 923d6392cfSJens Axboe { 933d6392cfSJens Axboe struct bsg_device *bd = bc->bd; 943d6392cfSJens Axboe unsigned long flags; 953d6392cfSJens Axboe 965309cb38SJens Axboe kmem_cache_free(bsg_cmd_cachep, bc); 973d6392cfSJens Axboe 983d6392cfSJens Axboe spin_lock_irqsave(&bd->lock, flags); 993d6392cfSJens Axboe bd->queued_cmds--; 1003d6392cfSJens Axboe spin_unlock_irqrestore(&bd->lock, flags); 1013d6392cfSJens Axboe 1023d6392cfSJens Axboe wake_up(&bd->wq_free); 1033d6392cfSJens Axboe } 1043d6392cfSJens Axboe 105e7d72173SFUJITA Tomonori static struct bsg_command *bsg_alloc_command(struct bsg_device *bd) 1063d6392cfSJens Axboe { 107e7d72173SFUJITA Tomonori struct bsg_command *bc = ERR_PTR(-EINVAL); 1083d6392cfSJens Axboe 1093d6392cfSJens Axboe spin_lock_irq(&bd->lock); 1103d6392cfSJens Axboe 1113d6392cfSJens Axboe if (bd->queued_cmds >= bd->max_queue) 1123d6392cfSJens Axboe goto out; 1133d6392cfSJens Axboe 1143d6392cfSJens Axboe bd->queued_cmds++; 1153d6392cfSJens Axboe spin_unlock_irq(&bd->lock); 1163d6392cfSJens Axboe 11725fd1643SJens Axboe bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL); 1185309cb38SJens Axboe if (unlikely(!bc)) { 1195309cb38SJens Axboe spin_lock_irq(&bd->lock); 1207e75d730SFUJITA Tomonori bd->queued_cmds--; 121e7d72173SFUJITA Tomonori bc = ERR_PTR(-ENOMEM); 1227e75d730SFUJITA Tomonori goto out; 1235309cb38SJens Axboe } 1245309cb38SJens Axboe 1253d6392cfSJens Axboe bc->bd = bd; 1263d6392cfSJens Axboe INIT_LIST_HEAD(&bc->list); 1275309cb38SJens Axboe dprintk("%s: returning free cmd %p\n", bd->name, bc); 1283d6392cfSJens Axboe return bc; 1293d6392cfSJens Axboe out: 1303d6392cfSJens Axboe spin_unlock_irq(&bd->lock); 1313d6392cfSJens Axboe return bc; 1323d6392cfSJens Axboe } 1333d6392cfSJens Axboe 1341c1133e1SFUJITA Tomonori static inline struct hlist_head *bsg_dev_idx_hash(int index) 1353d6392cfSJens Axboe { 1361c1133e1SFUJITA Tomonori return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)]; 1373d6392cfSJens Axboe } 1383d6392cfSJens Axboe 13925fd1643SJens Axboe static int bsg_io_schedule(struct bsg_device *bd) 1403d6392cfSJens Axboe { 1413d6392cfSJens Axboe DEFINE_WAIT(wait); 1423d6392cfSJens Axboe int ret = 0; 1433d6392cfSJens Axboe 1443d6392cfSJens Axboe spin_lock_irq(&bd->lock); 1453d6392cfSJens Axboe 1463d6392cfSJens Axboe BUG_ON(bd->done_cmds > bd->queued_cmds); 1473d6392cfSJens Axboe 1483d6392cfSJens Axboe /* 1493d6392cfSJens Axboe * -ENOSPC or -ENODATA? I'm going for -ENODATA, meaning "I have no 1503d6392cfSJens Axboe * work to do", even though we return -ENOSPC after this same test 1513d6392cfSJens Axboe * during bsg_write() -- there, it means our buffer can't have more 1523d6392cfSJens Axboe * bsg_commands added to it, thus has no space left. 1533d6392cfSJens Axboe */ 1543d6392cfSJens Axboe if (bd->done_cmds == bd->queued_cmds) { 1553d6392cfSJens Axboe ret = -ENODATA; 1563d6392cfSJens Axboe goto unlock; 1573d6392cfSJens Axboe } 1583d6392cfSJens Axboe 1593d6392cfSJens Axboe if (!test_bit(BSG_F_BLOCK, &bd->flags)) { 1603d6392cfSJens Axboe ret = -EAGAIN; 1613d6392cfSJens Axboe goto unlock; 1623d6392cfSJens Axboe } 1633d6392cfSJens Axboe 16425fd1643SJens Axboe prepare_to_wait(&bd->wq_done, &wait, TASK_UNINTERRUPTIBLE); 1653d6392cfSJens Axboe spin_unlock_irq(&bd->lock); 1663d6392cfSJens Axboe io_schedule(); 1673d6392cfSJens Axboe finish_wait(&bd->wq_done, &wait); 1683d6392cfSJens Axboe 1693d6392cfSJens Axboe return ret; 1703d6392cfSJens Axboe unlock: 1713d6392cfSJens Axboe spin_unlock_irq(&bd->lock); 1723d6392cfSJens Axboe return ret; 1733d6392cfSJens Axboe } 1743d6392cfSJens Axboe 175165125e1SJens Axboe static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq, 176abf54393SFUJITA Tomonori struct sg_io_v4 *hdr, struct bsg_device *bd, 177aeb5d727SAl Viro fmode_t has_write_perm) 17870e36eceSFUJITA Tomonori { 1799f5de6b1SFUJITA Tomonori if (hdr->request_len > BLK_MAX_CDB) { 1809f5de6b1SFUJITA Tomonori rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL); 1819f5de6b1SFUJITA Tomonori if (!rq->cmd) 1829f5de6b1SFUJITA Tomonori return -ENOMEM; 1839f5de6b1SFUJITA Tomonori } 18470e36eceSFUJITA Tomonori 1852b727c63SNamhyung Kim if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request, 18670e36eceSFUJITA Tomonori hdr->request_len)) 18770e36eceSFUJITA Tomonori return -EFAULT; 18815d10b61SFUJITA Tomonori 18915d10b61SFUJITA Tomonori if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) { 190018e0446SJens Axboe if (blk_verify_command(rq->cmd, has_write_perm)) 19170e36eceSFUJITA Tomonori return -EPERM; 19215d10b61SFUJITA Tomonori } else if (!capable(CAP_SYS_RAWIO)) 19315d10b61SFUJITA Tomonori return -EPERM; 19470e36eceSFUJITA Tomonori 1953d6392cfSJens Axboe /* 19670e36eceSFUJITA Tomonori * fill in request structure 19770e36eceSFUJITA Tomonori */ 19870e36eceSFUJITA Tomonori rq->cmd_len = hdr->request_len; 19970e36eceSFUJITA Tomonori rq->cmd_type = REQ_TYPE_BLOCK_PC; 20070e36eceSFUJITA Tomonori 201ad5ebd2fSRandy Dunlap rq->timeout = msecs_to_jiffies(hdr->timeout); 20270e36eceSFUJITA Tomonori if (!rq->timeout) 20370e36eceSFUJITA Tomonori rq->timeout = q->sg_timeout; 20470e36eceSFUJITA Tomonori if (!rq->timeout) 20570e36eceSFUJITA Tomonori rq->timeout = BLK_DEFAULT_SG_TIMEOUT; 206f2f1fa78SLinus Torvalds if (rq->timeout < BLK_MIN_SG_TIMEOUT) 207f2f1fa78SLinus Torvalds rq->timeout = BLK_MIN_SG_TIMEOUT; 20870e36eceSFUJITA Tomonori 20970e36eceSFUJITA Tomonori return 0; 21070e36eceSFUJITA Tomonori } 21170e36eceSFUJITA Tomonori 21270e36eceSFUJITA Tomonori /* 21370e36eceSFUJITA Tomonori * Check if sg_io_v4 from user is allowed and valid 2143d6392cfSJens Axboe */ 2153d6392cfSJens Axboe static int 216165125e1SJens Axboe bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw) 2173d6392cfSJens Axboe { 21815d10b61SFUJITA Tomonori int ret = 0; 21915d10b61SFUJITA Tomonori 22070e36eceSFUJITA Tomonori if (hdr->guard != 'Q') 2213d6392cfSJens Axboe return -EINVAL; 2223d6392cfSJens Axboe 22315d10b61SFUJITA Tomonori switch (hdr->protocol) { 22415d10b61SFUJITA Tomonori case BSG_PROTOCOL_SCSI: 22515d10b61SFUJITA Tomonori switch (hdr->subprotocol) { 22615d10b61SFUJITA Tomonori case BSG_SUB_PROTOCOL_SCSI_CMD: 22715d10b61SFUJITA Tomonori case BSG_SUB_PROTOCOL_SCSI_TRANSPORT: 22815d10b61SFUJITA Tomonori break; 22915d10b61SFUJITA Tomonori default: 23015d10b61SFUJITA Tomonori ret = -EINVAL; 23115d10b61SFUJITA Tomonori } 23215d10b61SFUJITA Tomonori break; 23315d10b61SFUJITA Tomonori default: 23415d10b61SFUJITA Tomonori ret = -EINVAL; 23515d10b61SFUJITA Tomonori } 23670e36eceSFUJITA Tomonori 23770e36eceSFUJITA Tomonori *rw = hdr->dout_xfer_len ? WRITE : READ; 23815d10b61SFUJITA Tomonori return ret; 2393d6392cfSJens Axboe } 2403d6392cfSJens Axboe 2413d6392cfSJens Axboe /* 24270e36eceSFUJITA Tomonori * map sg_io_v4 to a request. 2433d6392cfSJens Axboe */ 2443d6392cfSJens Axboe static struct request * 245c1c20120SBoaz Harrosh bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm, 246c1c20120SBoaz Harrosh u8 *sense) 2473d6392cfSJens Axboe { 248165125e1SJens Axboe struct request_queue *q = bd->queue; 2492c9ecdf4SFUJITA Tomonori struct request *rq, *next_rq = NULL; 25025fd1643SJens Axboe int ret, rw; 25170e36eceSFUJITA Tomonori unsigned int dxfer_len; 2522b727c63SNamhyung Kim void __user *dxferp = NULL; 253c7a841f3SJames Smart struct bsg_class_device *bcd = &q->bsg_dev; 254c7a841f3SJames Smart 255c7a841f3SJames Smart /* if the LLD has been removed then the bsg_unregister_queue will 256c7a841f3SJames Smart * eventually be called and the class_dev was freed, so we can no 257c7a841f3SJames Smart * longer use this request_queue. Return no such address. 258c7a841f3SJames Smart */ 259c7a841f3SJames Smart if (!bcd->class_dev) 260c7a841f3SJames Smart return ERR_PTR(-ENXIO); 2613d6392cfSJens Axboe 26270e36eceSFUJITA Tomonori dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp, 26370e36eceSFUJITA Tomonori hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp, 26470e36eceSFUJITA Tomonori hdr->din_xfer_len); 2653d6392cfSJens Axboe 26670e36eceSFUJITA Tomonori ret = bsg_validate_sgv4_hdr(q, hdr, &rw); 2673d6392cfSJens Axboe if (ret) 2683d6392cfSJens Axboe return ERR_PTR(ret); 2693d6392cfSJens Axboe 2703d6392cfSJens Axboe /* 2713ad2f3fbSDaniel Mack * map scatter-gather elements separately and string them to request 2723d6392cfSJens Axboe */ 2733d6392cfSJens Axboe rq = blk_get_request(q, rw, GFP_KERNEL); 2742c9ecdf4SFUJITA Tomonori if (!rq) 2752c9ecdf4SFUJITA Tomonori return ERR_PTR(-ENOMEM); 276abf54393SFUJITA Tomonori ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm); 2772c9ecdf4SFUJITA Tomonori if (ret) 2782c9ecdf4SFUJITA Tomonori goto out; 2792c9ecdf4SFUJITA Tomonori 2802c9ecdf4SFUJITA Tomonori if (rw == WRITE && hdr->din_xfer_len) { 2812c9ecdf4SFUJITA Tomonori if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) { 2822c9ecdf4SFUJITA Tomonori ret = -EOPNOTSUPP; 2832c9ecdf4SFUJITA Tomonori goto out; 2842c9ecdf4SFUJITA Tomonori } 2852c9ecdf4SFUJITA Tomonori 2862c9ecdf4SFUJITA Tomonori next_rq = blk_get_request(q, READ, GFP_KERNEL); 2872c9ecdf4SFUJITA Tomonori if (!next_rq) { 2882c9ecdf4SFUJITA Tomonori ret = -ENOMEM; 2892c9ecdf4SFUJITA Tomonori goto out; 2902c9ecdf4SFUJITA Tomonori } 2912c9ecdf4SFUJITA Tomonori rq->next_rq = next_rq; 29240f62028SJames Bottomley next_rq->cmd_type = rq->cmd_type; 2932c9ecdf4SFUJITA Tomonori 2942b727c63SNamhyung Kim dxferp = (void __user *)(unsigned long)hdr->din_xferp; 295152e283fSFUJITA Tomonori ret = blk_rq_map_user(q, next_rq, NULL, dxferp, 296152e283fSFUJITA Tomonori hdr->din_xfer_len, GFP_KERNEL); 2972c9ecdf4SFUJITA Tomonori if (ret) 2982c9ecdf4SFUJITA Tomonori goto out; 2993d6392cfSJens Axboe } 3003d6392cfSJens Axboe 30170e36eceSFUJITA Tomonori if (hdr->dout_xfer_len) { 30270e36eceSFUJITA Tomonori dxfer_len = hdr->dout_xfer_len; 3032b727c63SNamhyung Kim dxferp = (void __user *)(unsigned long)hdr->dout_xferp; 30470e36eceSFUJITA Tomonori } else if (hdr->din_xfer_len) { 30570e36eceSFUJITA Tomonori dxfer_len = hdr->din_xfer_len; 3062b727c63SNamhyung Kim dxferp = (void __user *)(unsigned long)hdr->din_xferp; 30770e36eceSFUJITA Tomonori } else 30870e36eceSFUJITA Tomonori dxfer_len = 0; 3093d6392cfSJens Axboe 31070e36eceSFUJITA Tomonori if (dxfer_len) { 311152e283fSFUJITA Tomonori ret = blk_rq_map_user(q, rq, NULL, dxferp, dxfer_len, 312152e283fSFUJITA Tomonori GFP_KERNEL); 3132c9ecdf4SFUJITA Tomonori if (ret) 3142c9ecdf4SFUJITA Tomonori goto out; 3153d6392cfSJens Axboe } 316c1c20120SBoaz Harrosh 317c1c20120SBoaz Harrosh rq->sense = sense; 318c1c20120SBoaz Harrosh rq->sense_len = 0; 319c1c20120SBoaz Harrosh 3203d6392cfSJens Axboe return rq; 3212c9ecdf4SFUJITA Tomonori out: 3229f5de6b1SFUJITA Tomonori if (rq->cmd != rq->__cmd) 3239f5de6b1SFUJITA Tomonori kfree(rq->cmd); 3242c9ecdf4SFUJITA Tomonori blk_put_request(rq); 3252c9ecdf4SFUJITA Tomonori if (next_rq) { 3262c9ecdf4SFUJITA Tomonori blk_rq_unmap_user(next_rq->bio); 3272c9ecdf4SFUJITA Tomonori blk_put_request(next_rq); 3282c9ecdf4SFUJITA Tomonori } 3292c9ecdf4SFUJITA Tomonori return ERR_PTR(ret); 3303d6392cfSJens Axboe } 3313d6392cfSJens Axboe 3323d6392cfSJens Axboe /* 3333d6392cfSJens Axboe * async completion call-back from the block layer, when scsi/ide/whatever 3343d6392cfSJens Axboe * calls end_that_request_last() on a request 3353d6392cfSJens Axboe */ 3363d6392cfSJens Axboe static void bsg_rq_end_io(struct request *rq, int uptodate) 3373d6392cfSJens Axboe { 3383d6392cfSJens Axboe struct bsg_command *bc = rq->end_io_data; 3393d6392cfSJens Axboe struct bsg_device *bd = bc->bd; 3403d6392cfSJens Axboe unsigned long flags; 3413d6392cfSJens Axboe 3425309cb38SJens Axboe dprintk("%s: finished rq %p bc %p, bio %p stat %d\n", 3435309cb38SJens Axboe bd->name, rq, bc, bc->bio, uptodate); 3443d6392cfSJens Axboe 3453d6392cfSJens Axboe bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration); 3463d6392cfSJens Axboe 3473d6392cfSJens Axboe spin_lock_irqsave(&bd->lock, flags); 34825fd1643SJens Axboe list_move_tail(&bc->list, &bd->done_list); 34925fd1643SJens Axboe bd->done_cmds++; 3503d6392cfSJens Axboe spin_unlock_irqrestore(&bd->lock, flags); 35125fd1643SJens Axboe 35225fd1643SJens Axboe wake_up(&bd->wq_done); 3533d6392cfSJens Axboe } 3543d6392cfSJens Axboe 3553d6392cfSJens Axboe /* 3563d6392cfSJens Axboe * do final setup of a 'bc' and submit the matching 'rq' to the block 3573d6392cfSJens Axboe * layer for io 3583d6392cfSJens Axboe */ 359165125e1SJens Axboe static void bsg_add_command(struct bsg_device *bd, struct request_queue *q, 3603d6392cfSJens Axboe struct bsg_command *bc, struct request *rq) 3613d6392cfSJens Axboe { 36205378940SBoaz Harrosh int at_head = (0 == (bc->hdr.flags & BSG_FLAG_Q_AT_TAIL)); 36305378940SBoaz Harrosh 3643d6392cfSJens Axboe /* 3653d6392cfSJens Axboe * add bc command to busy queue and submit rq for io 3663d6392cfSJens Axboe */ 3673d6392cfSJens Axboe bc->rq = rq; 3683d6392cfSJens Axboe bc->bio = rq->bio; 3692c9ecdf4SFUJITA Tomonori if (rq->next_rq) 3702c9ecdf4SFUJITA Tomonori bc->bidi_bio = rq->next_rq->bio; 3713d6392cfSJens Axboe bc->hdr.duration = jiffies; 3723d6392cfSJens Axboe spin_lock_irq(&bd->lock); 3733d6392cfSJens Axboe list_add_tail(&bc->list, &bd->busy_list); 3743d6392cfSJens Axboe spin_unlock_irq(&bd->lock); 3753d6392cfSJens Axboe 3763d6392cfSJens Axboe dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc); 3773d6392cfSJens Axboe 3783d6392cfSJens Axboe rq->end_io_data = bc; 37905378940SBoaz Harrosh blk_execute_rq_nowait(q, NULL, rq, at_head, bsg_rq_end_io); 3803d6392cfSJens Axboe } 3813d6392cfSJens Axboe 38225fd1643SJens Axboe static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd) 3833d6392cfSJens Axboe { 3843d6392cfSJens Axboe struct bsg_command *bc = NULL; 3853d6392cfSJens Axboe 3863d6392cfSJens Axboe spin_lock_irq(&bd->lock); 3873d6392cfSJens Axboe if (bd->done_cmds) { 38843ac9e62SFUJITA Tomonori bc = list_first_entry(&bd->done_list, struct bsg_command, list); 38925fd1643SJens Axboe list_del(&bc->list); 39025fd1643SJens Axboe bd->done_cmds--; 3913d6392cfSJens Axboe } 3923d6392cfSJens Axboe spin_unlock_irq(&bd->lock); 3933d6392cfSJens Axboe 3943d6392cfSJens Axboe return bc; 3953d6392cfSJens Axboe } 3963d6392cfSJens Axboe 3973d6392cfSJens Axboe /* 3983d6392cfSJens Axboe * Get a finished command from the done list 3993d6392cfSJens Axboe */ 400e7d72173SFUJITA Tomonori static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd) 4013d6392cfSJens Axboe { 4023d6392cfSJens Axboe struct bsg_command *bc; 4033d6392cfSJens Axboe int ret; 4043d6392cfSJens Axboe 4053d6392cfSJens Axboe do { 4063d6392cfSJens Axboe bc = bsg_next_done_cmd(bd); 4073d6392cfSJens Axboe if (bc) 4083d6392cfSJens Axboe break; 4093d6392cfSJens Axboe 410e7d72173SFUJITA Tomonori if (!test_bit(BSG_F_BLOCK, &bd->flags)) { 411e7d72173SFUJITA Tomonori bc = ERR_PTR(-EAGAIN); 412e7d72173SFUJITA Tomonori break; 413e7d72173SFUJITA Tomonori } 414e7d72173SFUJITA Tomonori 415e7d72173SFUJITA Tomonori ret = wait_event_interruptible(bd->wq_done, bd->done_cmds); 4163d6392cfSJens Axboe if (ret) { 417e7d72173SFUJITA Tomonori bc = ERR_PTR(-ERESTARTSYS); 4183d6392cfSJens Axboe break; 4193d6392cfSJens Axboe } 4203d6392cfSJens Axboe } while (1); 4213d6392cfSJens Axboe 4223d6392cfSJens Axboe dprintk("%s: returning done %p\n", bd->name, bc); 4233d6392cfSJens Axboe 4243d6392cfSJens Axboe return bc; 4253d6392cfSJens Axboe } 4263d6392cfSJens Axboe 42770e36eceSFUJITA Tomonori static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, 4282c9ecdf4SFUJITA Tomonori struct bio *bio, struct bio *bidi_bio) 42970e36eceSFUJITA Tomonori { 43070e36eceSFUJITA Tomonori int ret = 0; 43170e36eceSFUJITA Tomonori 432c1c20120SBoaz Harrosh dprintk("rq %p bio %p 0x%x\n", rq, bio, rq->errors); 43370e36eceSFUJITA Tomonori /* 43470e36eceSFUJITA Tomonori * fill in all the output members 43570e36eceSFUJITA Tomonori */ 43647897160SFUJITA Tomonori hdr->device_status = rq->errors & 0xff; 43770e36eceSFUJITA Tomonori hdr->transport_status = host_byte(rq->errors); 43870e36eceSFUJITA Tomonori hdr->driver_status = driver_byte(rq->errors); 43970e36eceSFUJITA Tomonori hdr->info = 0; 44070e36eceSFUJITA Tomonori if (hdr->device_status || hdr->transport_status || hdr->driver_status) 44170e36eceSFUJITA Tomonori hdr->info |= SG_INFO_CHECK; 44270e36eceSFUJITA Tomonori hdr->response_len = 0; 44370e36eceSFUJITA Tomonori 44470e36eceSFUJITA Tomonori if (rq->sense_len && hdr->response) { 44525fd1643SJens Axboe int len = min_t(unsigned int, hdr->max_response_len, 44670e36eceSFUJITA Tomonori rq->sense_len); 44770e36eceSFUJITA Tomonori 4482b727c63SNamhyung Kim ret = copy_to_user((void __user *)(unsigned long)hdr->response, 44970e36eceSFUJITA Tomonori rq->sense, len); 45070e36eceSFUJITA Tomonori if (!ret) 45170e36eceSFUJITA Tomonori hdr->response_len = len; 45270e36eceSFUJITA Tomonori else 45370e36eceSFUJITA Tomonori ret = -EFAULT; 45470e36eceSFUJITA Tomonori } 45570e36eceSFUJITA Tomonori 4562c9ecdf4SFUJITA Tomonori if (rq->next_rq) { 457c3a4d78cSTejun Heo hdr->dout_resid = rq->resid_len; 458c3a4d78cSTejun Heo hdr->din_resid = rq->next_rq->resid_len; 4592c9ecdf4SFUJITA Tomonori blk_rq_unmap_user(bidi_bio); 4602c9ecdf4SFUJITA Tomonori blk_put_request(rq->next_rq); 4610c6a89baSFUJITA Tomonori } else if (rq_data_dir(rq) == READ) 462c3a4d78cSTejun Heo hdr->din_resid = rq->resid_len; 4630c6a89baSFUJITA Tomonori else 464c3a4d78cSTejun Heo hdr->dout_resid = rq->resid_len; 4652c9ecdf4SFUJITA Tomonori 4662d507a01SJames Bottomley /* 4672d507a01SJames Bottomley * If the request generated a negative error number, return it 4682d507a01SJames Bottomley * (providing we aren't already returning an error); if it's 4692d507a01SJames Bottomley * just a protocol response (i.e. non negative), that gets 4702d507a01SJames Bottomley * processed above. 4712d507a01SJames Bottomley */ 4722d507a01SJames Bottomley if (!ret && rq->errors < 0) 4732d507a01SJames Bottomley ret = rq->errors; 4742d507a01SJames Bottomley 47570e36eceSFUJITA Tomonori blk_rq_unmap_user(bio); 4769f5de6b1SFUJITA Tomonori if (rq->cmd != rq->__cmd) 4779f5de6b1SFUJITA Tomonori kfree(rq->cmd); 47870e36eceSFUJITA Tomonori blk_put_request(rq); 47970e36eceSFUJITA Tomonori 48070e36eceSFUJITA Tomonori return ret; 48170e36eceSFUJITA Tomonori } 48270e36eceSFUJITA Tomonori 4833d6392cfSJens Axboe static int bsg_complete_all_commands(struct bsg_device *bd) 4843d6392cfSJens Axboe { 4853d6392cfSJens Axboe struct bsg_command *bc; 4863d6392cfSJens Axboe int ret, tret; 4873d6392cfSJens Axboe 4883d6392cfSJens Axboe dprintk("%s: entered\n", bd->name); 4893d6392cfSJens Axboe 4903d6392cfSJens Axboe /* 4913d6392cfSJens Axboe * wait for all commands to complete 4923d6392cfSJens Axboe */ 4933d6392cfSJens Axboe ret = 0; 4943d6392cfSJens Axboe do { 49525fd1643SJens Axboe ret = bsg_io_schedule(bd); 4963d6392cfSJens Axboe /* 4973d6392cfSJens Axboe * look for -ENODATA specifically -- we'll sometimes get 4983d6392cfSJens Axboe * -ERESTARTSYS when we've taken a signal, but we can't 4993d6392cfSJens Axboe * return until we're done freeing the queue, so ignore 5003d6392cfSJens Axboe * it. The signal will get handled when we're done freeing 5013d6392cfSJens Axboe * the bsg_device. 5023d6392cfSJens Axboe */ 5033d6392cfSJens Axboe } while (ret != -ENODATA); 5043d6392cfSJens Axboe 5053d6392cfSJens Axboe /* 5063d6392cfSJens Axboe * discard done commands 5073d6392cfSJens Axboe */ 5083d6392cfSJens Axboe ret = 0; 5093d6392cfSJens Axboe do { 510e7d72173SFUJITA Tomonori spin_lock_irq(&bd->lock); 511e7d72173SFUJITA Tomonori if (!bd->queued_cmds) { 512e7d72173SFUJITA Tomonori spin_unlock_irq(&bd->lock); 5133d6392cfSJens Axboe break; 5143d6392cfSJens Axboe } 515efba1a31SFUJITA Tomonori spin_unlock_irq(&bd->lock); 5163d6392cfSJens Axboe 517e7d72173SFUJITA Tomonori bc = bsg_get_done_cmd(bd); 518e7d72173SFUJITA Tomonori if (IS_ERR(bc)) 519e7d72173SFUJITA Tomonori break; 520e7d72173SFUJITA Tomonori 5212c9ecdf4SFUJITA Tomonori tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, 5222c9ecdf4SFUJITA Tomonori bc->bidi_bio); 5233d6392cfSJens Axboe if (!ret) 5243d6392cfSJens Axboe ret = tret; 5253d6392cfSJens Axboe 5263d6392cfSJens Axboe bsg_free_command(bc); 5273d6392cfSJens Axboe } while (1); 5283d6392cfSJens Axboe 5293d6392cfSJens Axboe return ret; 5303d6392cfSJens Axboe } 5313d6392cfSJens Axboe 53225fd1643SJens Axboe static int 533e7d72173SFUJITA Tomonori __bsg_read(char __user *buf, size_t count, struct bsg_device *bd, 534e7d72173SFUJITA Tomonori const struct iovec *iov, ssize_t *bytes_read) 5353d6392cfSJens Axboe { 5363d6392cfSJens Axboe struct bsg_command *bc; 5373d6392cfSJens Axboe int nr_commands, ret; 5383d6392cfSJens Axboe 53970e36eceSFUJITA Tomonori if (count % sizeof(struct sg_io_v4)) 5403d6392cfSJens Axboe return -EINVAL; 5413d6392cfSJens Axboe 5423d6392cfSJens Axboe ret = 0; 54370e36eceSFUJITA Tomonori nr_commands = count / sizeof(struct sg_io_v4); 5443d6392cfSJens Axboe while (nr_commands) { 545e7d72173SFUJITA Tomonori bc = bsg_get_done_cmd(bd); 5463d6392cfSJens Axboe if (IS_ERR(bc)) { 5473d6392cfSJens Axboe ret = PTR_ERR(bc); 5483d6392cfSJens Axboe break; 5493d6392cfSJens Axboe } 5503d6392cfSJens Axboe 5513d6392cfSJens Axboe /* 5523d6392cfSJens Axboe * this is the only case where we need to copy data back 5533d6392cfSJens Axboe * after completing the request. so do that here, 5543d6392cfSJens Axboe * bsg_complete_work() cannot do that for us 5553d6392cfSJens Axboe */ 5562c9ecdf4SFUJITA Tomonori ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, 5572c9ecdf4SFUJITA Tomonori bc->bidi_bio); 5583d6392cfSJens Axboe 55925fd1643SJens Axboe if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr))) 5603d6392cfSJens Axboe ret = -EFAULT; 5613d6392cfSJens Axboe 5623d6392cfSJens Axboe bsg_free_command(bc); 5633d6392cfSJens Axboe 5643d6392cfSJens Axboe if (ret) 5653d6392cfSJens Axboe break; 5663d6392cfSJens Axboe 56770e36eceSFUJITA Tomonori buf += sizeof(struct sg_io_v4); 56870e36eceSFUJITA Tomonori *bytes_read += sizeof(struct sg_io_v4); 5693d6392cfSJens Axboe nr_commands--; 5703d6392cfSJens Axboe } 5713d6392cfSJens Axboe 5723d6392cfSJens Axboe return ret; 5733d6392cfSJens Axboe } 5743d6392cfSJens Axboe 5753d6392cfSJens Axboe static inline void bsg_set_block(struct bsg_device *bd, struct file *file) 5763d6392cfSJens Axboe { 5773d6392cfSJens Axboe if (file->f_flags & O_NONBLOCK) 5783d6392cfSJens Axboe clear_bit(BSG_F_BLOCK, &bd->flags); 5793d6392cfSJens Axboe else 5803d6392cfSJens Axboe set_bit(BSG_F_BLOCK, &bd->flags); 5813d6392cfSJens Axboe } 5823d6392cfSJens Axboe 58325fd1643SJens Axboe /* 58425fd1643SJens Axboe * Check if the error is a "real" error that we should return. 58525fd1643SJens Axboe */ 5863d6392cfSJens Axboe static inline int err_block_err(int ret) 5873d6392cfSJens Axboe { 5883d6392cfSJens Axboe if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN) 5893d6392cfSJens Axboe return 1; 5903d6392cfSJens Axboe 5913d6392cfSJens Axboe return 0; 5923d6392cfSJens Axboe } 5933d6392cfSJens Axboe 5943d6392cfSJens Axboe static ssize_t 5953d6392cfSJens Axboe bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) 5963d6392cfSJens Axboe { 5973d6392cfSJens Axboe struct bsg_device *bd = file->private_data; 5983d6392cfSJens Axboe int ret; 5993d6392cfSJens Axboe ssize_t bytes_read; 6003d6392cfSJens Axboe 6019e69fbb5SFUJITA Tomonori dprintk("%s: read %Zd bytes\n", bd->name, count); 6023d6392cfSJens Axboe 6033d6392cfSJens Axboe bsg_set_block(bd, file); 6040b07de85SAdel Gadllah 6053d6392cfSJens Axboe bytes_read = 0; 606e7d72173SFUJITA Tomonori ret = __bsg_read(buf, count, bd, NULL, &bytes_read); 6073d6392cfSJens Axboe *ppos = bytes_read; 6083d6392cfSJens Axboe 60944194e3eSNamhyung Kim if (!bytes_read || err_block_err(ret)) 6103d6392cfSJens Axboe bytes_read = ret; 6113d6392cfSJens Axboe 6123d6392cfSJens Axboe return bytes_read; 6133d6392cfSJens Axboe } 6143d6392cfSJens Axboe 61525fd1643SJens Axboe static int __bsg_write(struct bsg_device *bd, const char __user *buf, 616aeb5d727SAl Viro size_t count, ssize_t *bytes_written, 617aeb5d727SAl Viro fmode_t has_write_perm) 6183d6392cfSJens Axboe { 6193d6392cfSJens Axboe struct bsg_command *bc; 6203d6392cfSJens Axboe struct request *rq; 6213d6392cfSJens Axboe int ret, nr_commands; 6223d6392cfSJens Axboe 62370e36eceSFUJITA Tomonori if (count % sizeof(struct sg_io_v4)) 6243d6392cfSJens Axboe return -EINVAL; 6253d6392cfSJens Axboe 62670e36eceSFUJITA Tomonori nr_commands = count / sizeof(struct sg_io_v4); 6273d6392cfSJens Axboe rq = NULL; 6283d6392cfSJens Axboe bc = NULL; 6293d6392cfSJens Axboe ret = 0; 6303d6392cfSJens Axboe while (nr_commands) { 631165125e1SJens Axboe struct request_queue *q = bd->queue; 6323d6392cfSJens Axboe 633e7d72173SFUJITA Tomonori bc = bsg_alloc_command(bd); 6343d6392cfSJens Axboe if (IS_ERR(bc)) { 6353d6392cfSJens Axboe ret = PTR_ERR(bc); 6363d6392cfSJens Axboe bc = NULL; 6373d6392cfSJens Axboe break; 6383d6392cfSJens Axboe } 6393d6392cfSJens Axboe 6403d6392cfSJens Axboe if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) { 6413d6392cfSJens Axboe ret = -EFAULT; 6423d6392cfSJens Axboe break; 6433d6392cfSJens Axboe } 6443d6392cfSJens Axboe 6453d6392cfSJens Axboe /* 6463d6392cfSJens Axboe * get a request, fill in the blanks, and add to request queue 6473d6392cfSJens Axboe */ 648c1c20120SBoaz Harrosh rq = bsg_map_hdr(bd, &bc->hdr, has_write_perm, bc->sense); 6493d6392cfSJens Axboe if (IS_ERR(rq)) { 6503d6392cfSJens Axboe ret = PTR_ERR(rq); 6513d6392cfSJens Axboe rq = NULL; 6523d6392cfSJens Axboe break; 6533d6392cfSJens Axboe } 6543d6392cfSJens Axboe 6553d6392cfSJens Axboe bsg_add_command(bd, q, bc, rq); 6563d6392cfSJens Axboe bc = NULL; 6573d6392cfSJens Axboe rq = NULL; 6583d6392cfSJens Axboe nr_commands--; 65970e36eceSFUJITA Tomonori buf += sizeof(struct sg_io_v4); 66025fd1643SJens Axboe *bytes_written += sizeof(struct sg_io_v4); 6613d6392cfSJens Axboe } 6623d6392cfSJens Axboe 6633d6392cfSJens Axboe if (bc) 6643d6392cfSJens Axboe bsg_free_command(bc); 6653d6392cfSJens Axboe 6663d6392cfSJens Axboe return ret; 6673d6392cfSJens Axboe } 6683d6392cfSJens Axboe 6693d6392cfSJens Axboe static ssize_t 6703d6392cfSJens Axboe bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) 6713d6392cfSJens Axboe { 6723d6392cfSJens Axboe struct bsg_device *bd = file->private_data; 67325fd1643SJens Axboe ssize_t bytes_written; 6743d6392cfSJens Axboe int ret; 6753d6392cfSJens Axboe 6769e69fbb5SFUJITA Tomonori dprintk("%s: write %Zd bytes\n", bd->name, count); 6773d6392cfSJens Axboe 6783d6392cfSJens Axboe bsg_set_block(bd, file); 6793d6392cfSJens Axboe 68025fd1643SJens Axboe bytes_written = 0; 681abf54393SFUJITA Tomonori ret = __bsg_write(bd, buf, count, &bytes_written, 682abf54393SFUJITA Tomonori file->f_mode & FMODE_WRITE); 683abf54393SFUJITA Tomonori 68425fd1643SJens Axboe *ppos = bytes_written; 6853d6392cfSJens Axboe 6863d6392cfSJens Axboe /* 6873d6392cfSJens Axboe * return bytes written on non-fatal errors 6883d6392cfSJens Axboe */ 68944194e3eSNamhyung Kim if (!bytes_written || err_block_err(ret)) 69025fd1643SJens Axboe bytes_written = ret; 6913d6392cfSJens Axboe 69225fd1643SJens Axboe dprintk("%s: returning %Zd\n", bd->name, bytes_written); 69325fd1643SJens Axboe return bytes_written; 6943d6392cfSJens Axboe } 6953d6392cfSJens Axboe 6963d6392cfSJens Axboe static struct bsg_device *bsg_alloc_device(void) 6973d6392cfSJens Axboe { 6983d6392cfSJens Axboe struct bsg_device *bd; 6993d6392cfSJens Axboe 7003d6392cfSJens Axboe bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL); 7013d6392cfSJens Axboe if (unlikely(!bd)) 7023d6392cfSJens Axboe return NULL; 7033d6392cfSJens Axboe 7043d6392cfSJens Axboe spin_lock_init(&bd->lock); 7053d6392cfSJens Axboe 7065309cb38SJens Axboe bd->max_queue = BSG_DEFAULT_CMDS; 7073d6392cfSJens Axboe 7083d6392cfSJens Axboe INIT_LIST_HEAD(&bd->busy_list); 7093d6392cfSJens Axboe INIT_LIST_HEAD(&bd->done_list); 7103d6392cfSJens Axboe INIT_HLIST_NODE(&bd->dev_list); 7113d6392cfSJens Axboe 7123d6392cfSJens Axboe init_waitqueue_head(&bd->wq_free); 7133d6392cfSJens Axboe init_waitqueue_head(&bd->wq_done); 7143d6392cfSJens Axboe return bd; 7153d6392cfSJens Axboe } 7163d6392cfSJens Axboe 71797f46ae4SFUJITA Tomonori static void bsg_kref_release_function(struct kref *kref) 71897f46ae4SFUJITA Tomonori { 71997f46ae4SFUJITA Tomonori struct bsg_class_device *bcd = 72097f46ae4SFUJITA Tomonori container_of(kref, struct bsg_class_device, ref); 7218df5fc04SJames Bottomley struct device *parent = bcd->parent; 72297f46ae4SFUJITA Tomonori 72397f46ae4SFUJITA Tomonori if (bcd->release) 72497f46ae4SFUJITA Tomonori bcd->release(bcd->parent); 72597f46ae4SFUJITA Tomonori 7268df5fc04SJames Bottomley put_device(parent); 72797f46ae4SFUJITA Tomonori } 72897f46ae4SFUJITA Tomonori 7293d6392cfSJens Axboe static int bsg_put_device(struct bsg_device *bd) 7303d6392cfSJens Axboe { 73197f46ae4SFUJITA Tomonori int ret = 0, do_free; 73297f46ae4SFUJITA Tomonori struct request_queue *q = bd->queue; 7333d6392cfSJens Axboe 7343d6392cfSJens Axboe mutex_lock(&bsg_mutex); 7353d6392cfSJens Axboe 73697f46ae4SFUJITA Tomonori do_free = atomic_dec_and_test(&bd->ref_count); 7373f27e3edSFUJITA Tomonori if (!do_free) { 7383f27e3edSFUJITA Tomonori mutex_unlock(&bsg_mutex); 7393d6392cfSJens Axboe goto out; 7403f27e3edSFUJITA Tomonori } 7413f27e3edSFUJITA Tomonori 7423f27e3edSFUJITA Tomonori hlist_del(&bd->dev_list); 7433f27e3edSFUJITA Tomonori mutex_unlock(&bsg_mutex); 7443d6392cfSJens Axboe 7453d6392cfSJens Axboe dprintk("%s: tearing down\n", bd->name); 7463d6392cfSJens Axboe 7473d6392cfSJens Axboe /* 7483d6392cfSJens Axboe * close can always block 7493d6392cfSJens Axboe */ 7503d6392cfSJens Axboe set_bit(BSG_F_BLOCK, &bd->flags); 7513d6392cfSJens Axboe 7523d6392cfSJens Axboe /* 7533d6392cfSJens Axboe * correct error detection baddies here again. it's the responsibility 7543d6392cfSJens Axboe * of the app to properly reap commands before close() if it wants 7553d6392cfSJens Axboe * fool-proof error detection 7563d6392cfSJens Axboe */ 7573d6392cfSJens Axboe ret = bsg_complete_all_commands(bd); 7583d6392cfSJens Axboe 7595309cb38SJens Axboe kfree(bd); 7603d6392cfSJens Axboe out: 76197f46ae4SFUJITA Tomonori kref_put(&q->bsg_dev.ref, bsg_kref_release_function); 76297f46ae4SFUJITA Tomonori if (do_free) 76397f46ae4SFUJITA Tomonori blk_put_queue(q); 7643d6392cfSJens Axboe return ret; 7653d6392cfSJens Axboe } 7663d6392cfSJens Axboe 7673d6392cfSJens Axboe static struct bsg_device *bsg_add_device(struct inode *inode, 768d351af01SFUJITA Tomonori struct request_queue *rq, 7693d6392cfSJens Axboe struct file *file) 7703d6392cfSJens Axboe { 77125fd1643SJens Axboe struct bsg_device *bd; 7723d6392cfSJens Axboe #ifdef BSG_DEBUG 7733d6392cfSJens Axboe unsigned char buf[32]; 7743d6392cfSJens Axboe #endif 77509ac46c4STejun Heo if (!blk_get_queue(rq)) 776c3ff1b90SFUJITA Tomonori return ERR_PTR(-ENXIO); 7773d6392cfSJens Axboe 7783d6392cfSJens Axboe bd = bsg_alloc_device(); 779c3ff1b90SFUJITA Tomonori if (!bd) { 780c3ff1b90SFUJITA Tomonori blk_put_queue(rq); 7813d6392cfSJens Axboe return ERR_PTR(-ENOMEM); 782c3ff1b90SFUJITA Tomonori } 7833d6392cfSJens Axboe 784d351af01SFUJITA Tomonori bd->queue = rq; 7850b07de85SAdel Gadllah 7863d6392cfSJens Axboe bsg_set_block(bd, file); 7873d6392cfSJens Axboe 7883d6392cfSJens Axboe atomic_set(&bd->ref_count, 1); 7893d6392cfSJens Axboe mutex_lock(&bsg_mutex); 790842ea771SFUJITA Tomonori hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode))); 7913d6392cfSJens Axboe 7923ada8b7eSKay Sievers strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1); 7933d6392cfSJens Axboe dprintk("bound to <%s>, max queue %d\n", 7949e69fbb5SFUJITA Tomonori format_dev_t(buf, inode->i_rdev), bd->max_queue); 7953d6392cfSJens Axboe 7963d6392cfSJens Axboe mutex_unlock(&bsg_mutex); 7973d6392cfSJens Axboe return bd; 7983d6392cfSJens Axboe } 7993d6392cfSJens Axboe 800842ea771SFUJITA Tomonori static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q) 8013d6392cfSJens Axboe { 80243ac9e62SFUJITA Tomonori struct bsg_device *bd; 8033d6392cfSJens Axboe struct hlist_node *entry; 8043d6392cfSJens Axboe 8053d6392cfSJens Axboe mutex_lock(&bsg_mutex); 8063d6392cfSJens Axboe 80743ac9e62SFUJITA Tomonori hlist_for_each_entry(bd, entry, bsg_dev_idx_hash(minor), dev_list) { 808842ea771SFUJITA Tomonori if (bd->queue == q) { 8093d6392cfSJens Axboe atomic_inc(&bd->ref_count); 81043ac9e62SFUJITA Tomonori goto found; 8113d6392cfSJens Axboe } 81243ac9e62SFUJITA Tomonori } 8133d6392cfSJens Axboe bd = NULL; 81443ac9e62SFUJITA Tomonori found: 8153d6392cfSJens Axboe mutex_unlock(&bsg_mutex); 8163d6392cfSJens Axboe return bd; 8173d6392cfSJens Axboe } 8183d6392cfSJens Axboe 8193d6392cfSJens Axboe static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file) 8203d6392cfSJens Axboe { 821598443a2SFUJITA Tomonori struct bsg_device *bd; 822598443a2SFUJITA Tomonori struct bsg_class_device *bcd; 8233d6392cfSJens Axboe 8243d6392cfSJens Axboe /* 8253d6392cfSJens Axboe * find the class device 8263d6392cfSJens Axboe */ 8273d6392cfSJens Axboe mutex_lock(&bsg_mutex); 828598443a2SFUJITA Tomonori bcd = idr_find(&bsg_minor_idr, iminor(inode)); 829d45ac4faSFUJITA Tomonori if (bcd) 83097f46ae4SFUJITA Tomonori kref_get(&bcd->ref); 8313d6392cfSJens Axboe mutex_unlock(&bsg_mutex); 8323d6392cfSJens Axboe 8333d6392cfSJens Axboe if (!bcd) 8343d6392cfSJens Axboe return ERR_PTR(-ENODEV); 8353d6392cfSJens Axboe 836842ea771SFUJITA Tomonori bd = __bsg_get_device(iminor(inode), bcd->queue); 837d45ac4faSFUJITA Tomonori if (bd) 838d45ac4faSFUJITA Tomonori return bd; 839d45ac4faSFUJITA Tomonori 840d45ac4faSFUJITA Tomonori bd = bsg_add_device(inode, bcd->queue, file); 841d45ac4faSFUJITA Tomonori if (IS_ERR(bd)) 84297f46ae4SFUJITA Tomonori kref_put(&bcd->ref, bsg_kref_release_function); 843d45ac4faSFUJITA Tomonori 844d45ac4faSFUJITA Tomonori return bd; 8453d6392cfSJens Axboe } 8463d6392cfSJens Axboe 8473d6392cfSJens Axboe static int bsg_open(struct inode *inode, struct file *file) 8483d6392cfSJens Axboe { 84975bd2ef1SJonathan Corbet struct bsg_device *bd; 85075bd2ef1SJonathan Corbet 85175bd2ef1SJonathan Corbet bd = bsg_get_device(inode, file); 8523d6392cfSJens Axboe 8533d6392cfSJens Axboe if (IS_ERR(bd)) 8543d6392cfSJens Axboe return PTR_ERR(bd); 8553d6392cfSJens Axboe 8563d6392cfSJens Axboe file->private_data = bd; 8573d6392cfSJens Axboe return 0; 8583d6392cfSJens Axboe } 8593d6392cfSJens Axboe 8603d6392cfSJens Axboe static int bsg_release(struct inode *inode, struct file *file) 8613d6392cfSJens Axboe { 8623d6392cfSJens Axboe struct bsg_device *bd = file->private_data; 8633d6392cfSJens Axboe 8643d6392cfSJens Axboe file->private_data = NULL; 8653d6392cfSJens Axboe return bsg_put_device(bd); 8663d6392cfSJens Axboe } 8673d6392cfSJens Axboe 8683d6392cfSJens Axboe static unsigned int bsg_poll(struct file *file, poll_table *wait) 8693d6392cfSJens Axboe { 8703d6392cfSJens Axboe struct bsg_device *bd = file->private_data; 8713d6392cfSJens Axboe unsigned int mask = 0; 8723d6392cfSJens Axboe 8733d6392cfSJens Axboe poll_wait(file, &bd->wq_done, wait); 8743d6392cfSJens Axboe poll_wait(file, &bd->wq_free, wait); 8753d6392cfSJens Axboe 8763d6392cfSJens Axboe spin_lock_irq(&bd->lock); 8773d6392cfSJens Axboe if (!list_empty(&bd->done_list)) 8783d6392cfSJens Axboe mask |= POLLIN | POLLRDNORM; 87980ceb057SNamhyung Kim if (bd->queued_cmds < bd->max_queue) 8803d6392cfSJens Axboe mask |= POLLOUT; 8813d6392cfSJens Axboe spin_unlock_irq(&bd->lock); 8823d6392cfSJens Axboe 8833d6392cfSJens Axboe return mask; 8843d6392cfSJens Axboe } 8853d6392cfSJens Axboe 88625fd1643SJens Axboe static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 8873d6392cfSJens Axboe { 8883d6392cfSJens Axboe struct bsg_device *bd = file->private_data; 8893d6392cfSJens Axboe int __user *uarg = (int __user *) arg; 8902d507a01SJames Bottomley int ret; 8913d6392cfSJens Axboe 8923d6392cfSJens Axboe switch (cmd) { 8933d6392cfSJens Axboe /* 8943d6392cfSJens Axboe * our own ioctls 8953d6392cfSJens Axboe */ 8963d6392cfSJens Axboe case SG_GET_COMMAND_Q: 8973d6392cfSJens Axboe return put_user(bd->max_queue, uarg); 8983d6392cfSJens Axboe case SG_SET_COMMAND_Q: { 8993d6392cfSJens Axboe int queue; 9003d6392cfSJens Axboe 9013d6392cfSJens Axboe if (get_user(queue, uarg)) 9023d6392cfSJens Axboe return -EFAULT; 9035309cb38SJens Axboe if (queue < 1) 9043d6392cfSJens Axboe return -EINVAL; 9053d6392cfSJens Axboe 9065309cb38SJens Axboe spin_lock_irq(&bd->lock); 9073d6392cfSJens Axboe bd->max_queue = queue; 9085309cb38SJens Axboe spin_unlock_irq(&bd->lock); 9093d6392cfSJens Axboe return 0; 9103d6392cfSJens Axboe } 9113d6392cfSJens Axboe 9123d6392cfSJens Axboe /* 9133d6392cfSJens Axboe * SCSI/sg ioctls 9143d6392cfSJens Axboe */ 9153d6392cfSJens Axboe case SG_GET_VERSION_NUM: 9163d6392cfSJens Axboe case SCSI_IOCTL_GET_IDLUN: 9173d6392cfSJens Axboe case SCSI_IOCTL_GET_BUS_NUMBER: 9183d6392cfSJens Axboe case SG_SET_TIMEOUT: 9193d6392cfSJens Axboe case SG_GET_TIMEOUT: 9203d6392cfSJens Axboe case SG_GET_RESERVED_SIZE: 9213d6392cfSJens Axboe case SG_SET_RESERVED_SIZE: 9223d6392cfSJens Axboe case SG_EMULATED_HOST: 9233d6392cfSJens Axboe case SCSI_IOCTL_SEND_COMMAND: { 9243d6392cfSJens Axboe void __user *uarg = (void __user *) arg; 92574f3c8afSAl Viro return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg); 9263d6392cfSJens Axboe } 92710e8855bSFUJITA Tomonori case SG_IO: { 92810e8855bSFUJITA Tomonori struct request *rq; 9292c9ecdf4SFUJITA Tomonori struct bio *bio, *bidi_bio = NULL; 93010e8855bSFUJITA Tomonori struct sg_io_v4 hdr; 93105378940SBoaz Harrosh int at_head; 932c1c20120SBoaz Harrosh u8 sense[SCSI_SENSE_BUFFERSIZE]; 93310e8855bSFUJITA Tomonori 93410e8855bSFUJITA Tomonori if (copy_from_user(&hdr, uarg, sizeof(hdr))) 93510e8855bSFUJITA Tomonori return -EFAULT; 93610e8855bSFUJITA Tomonori 937c1c20120SBoaz Harrosh rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE, sense); 93810e8855bSFUJITA Tomonori if (IS_ERR(rq)) 93910e8855bSFUJITA Tomonori return PTR_ERR(rq); 94010e8855bSFUJITA Tomonori 94110e8855bSFUJITA Tomonori bio = rq->bio; 9422c9ecdf4SFUJITA Tomonori if (rq->next_rq) 9432c9ecdf4SFUJITA Tomonori bidi_bio = rq->next_rq->bio; 94405378940SBoaz Harrosh 94505378940SBoaz Harrosh at_head = (0 == (hdr.flags & BSG_FLAG_Q_AT_TAIL)); 94605378940SBoaz Harrosh blk_execute_rq(bd->queue, NULL, rq, at_head); 9472d507a01SJames Bottomley ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio); 94810e8855bSFUJITA Tomonori 94910e8855bSFUJITA Tomonori if (copy_to_user(uarg, &hdr, sizeof(hdr))) 95010e8855bSFUJITA Tomonori return -EFAULT; 951b711afa6SJens Axboe 9522d507a01SJames Bottomley return ret; 95310e8855bSFUJITA Tomonori } 9543d6392cfSJens Axboe /* 9553d6392cfSJens Axboe * block device ioctls 9563d6392cfSJens Axboe */ 9573d6392cfSJens Axboe default: 9583d6392cfSJens Axboe #if 0 9593d6392cfSJens Axboe return ioctl_by_bdev(bd->bdev, cmd, arg); 9603d6392cfSJens Axboe #else 9613d6392cfSJens Axboe return -ENOTTY; 9623d6392cfSJens Axboe #endif 9633d6392cfSJens Axboe } 9643d6392cfSJens Axboe } 9653d6392cfSJens Axboe 9667344be05SArjan van de Ven static const struct file_operations bsg_fops = { 9673d6392cfSJens Axboe .read = bsg_read, 9683d6392cfSJens Axboe .write = bsg_write, 9693d6392cfSJens Axboe .poll = bsg_poll, 9703d6392cfSJens Axboe .open = bsg_open, 9713d6392cfSJens Axboe .release = bsg_release, 97225fd1643SJens Axboe .unlocked_ioctl = bsg_ioctl, 9733d6392cfSJens Axboe .owner = THIS_MODULE, 9746038f373SArnd Bergmann .llseek = default_llseek, 9753d6392cfSJens Axboe }; 9763d6392cfSJens Axboe 977d351af01SFUJITA Tomonori void bsg_unregister_queue(struct request_queue *q) 9783d6392cfSJens Axboe { 979d351af01SFUJITA Tomonori struct bsg_class_device *bcd = &q->bsg_dev; 9803d6392cfSJens Axboe 981df468820SFUJITA Tomonori if (!bcd->class_dev) 982df468820SFUJITA Tomonori return; 9833d6392cfSJens Axboe 9843d6392cfSJens Axboe mutex_lock(&bsg_mutex); 985598443a2SFUJITA Tomonori idr_remove(&bsg_minor_idr, bcd->minor); 98637b40adfSStanislaw Gruszka if (q->kobj.sd) 987d351af01SFUJITA Tomonori sysfs_remove_link(&q->kobj, "bsg"); 988ee959b00STony Jones device_unregister(bcd->class_dev); 9893d6392cfSJens Axboe bcd->class_dev = NULL; 99097f46ae4SFUJITA Tomonori kref_put(&bcd->ref, bsg_kref_release_function); 9913d6392cfSJens Axboe mutex_unlock(&bsg_mutex); 9923d6392cfSJens Axboe } 9934cf0723aSFUJITA Tomonori EXPORT_SYMBOL_GPL(bsg_unregister_queue); 9943d6392cfSJens Axboe 99597f46ae4SFUJITA Tomonori int bsg_register_queue(struct request_queue *q, struct device *parent, 99697f46ae4SFUJITA Tomonori const char *name, void (*release)(struct device *)) 9973d6392cfSJens Axboe { 998598443a2SFUJITA Tomonori struct bsg_class_device *bcd; 9993d6392cfSJens Axboe dev_t dev; 1000*bab998d6STejun Heo int ret; 1001ee959b00STony Jones struct device *class_dev = NULL; 100239dca558SJames Bottomley const char *devname; 100339dca558SJames Bottomley 100439dca558SJames Bottomley if (name) 100539dca558SJames Bottomley devname = name; 100639dca558SJames Bottomley else 10073ada8b7eSKay Sievers devname = dev_name(parent); 10083d6392cfSJens Axboe 10093d6392cfSJens Axboe /* 10103d6392cfSJens Axboe * we need a proper transport to send commands, not a stacked device 10113d6392cfSJens Axboe */ 10123d6392cfSJens Axboe if (!q->request_fn) 10133d6392cfSJens Axboe return 0; 10143d6392cfSJens Axboe 1015d351af01SFUJITA Tomonori bcd = &q->bsg_dev; 10163d6392cfSJens Axboe memset(bcd, 0, sizeof(*bcd)); 10173d6392cfSJens Axboe 10183d6392cfSJens Axboe mutex_lock(&bsg_mutex); 1019598443a2SFUJITA Tomonori 1020*bab998d6STejun Heo ret = idr_alloc(&bsg_minor_idr, bcd, 0, BSG_MAX_DEVS, GFP_KERNEL); 1021*bab998d6STejun Heo if (ret < 0) { 1022*bab998d6STejun Heo if (ret == -ENOSPC) { 1023292b7f27SFUJITA Tomonori printk(KERN_ERR "bsg: too many bsg devices\n"); 1024598443a2SFUJITA Tomonori ret = -EINVAL; 1025*bab998d6STejun Heo } 1026*bab998d6STejun Heo goto unlock; 1027292b7f27SFUJITA Tomonori } 1028292b7f27SFUJITA Tomonori 1029*bab998d6STejun Heo bcd->minor = ret; 1030d351af01SFUJITA Tomonori bcd->queue = q; 103197f46ae4SFUJITA Tomonori bcd->parent = get_device(parent); 103297f46ae4SFUJITA Tomonori bcd->release = release; 103397f46ae4SFUJITA Tomonori kref_init(&bcd->ref); 103446f6ef4aSJens Axboe dev = MKDEV(bsg_major, bcd->minor); 10351ff9f542SGreg Kroah-Hartman class_dev = device_create(bsg_class, parent, dev, NULL, "%s", devname); 10364e2872d6SFUJITA Tomonori if (IS_ERR(class_dev)) { 10374e2872d6SFUJITA Tomonori ret = PTR_ERR(class_dev); 1038598443a2SFUJITA Tomonori goto put_dev; 10394e2872d6SFUJITA Tomonori } 10404e2872d6SFUJITA Tomonori bcd->class_dev = class_dev; 10414e2872d6SFUJITA Tomonori 1042abce891aSLinus Torvalds if (q->kobj.sd) { 10434e2872d6SFUJITA Tomonori ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg"); 10444e2872d6SFUJITA Tomonori if (ret) 1045598443a2SFUJITA Tomonori goto unregister_class_dev; 10464e2872d6SFUJITA Tomonori } 10474e2872d6SFUJITA Tomonori 10483d6392cfSJens Axboe mutex_unlock(&bsg_mutex); 10493d6392cfSJens Axboe return 0; 10506826ee4fSJames Bottomley 1051598443a2SFUJITA Tomonori unregister_class_dev: 1052ee959b00STony Jones device_unregister(class_dev); 1053598443a2SFUJITA Tomonori put_dev: 105497f46ae4SFUJITA Tomonori put_device(parent); 1055*bab998d6STejun Heo idr_remove(&bsg_minor_idr, bcd->minor); 1056598443a2SFUJITA Tomonori unlock: 1057264a0472SJens Axboe mutex_unlock(&bsg_mutex); 10584e2872d6SFUJITA Tomonori return ret; 10593d6392cfSJens Axboe } 10604cf0723aSFUJITA Tomonori EXPORT_SYMBOL_GPL(bsg_register_queue); 10613d6392cfSJens Axboe 10627e7654a9SGreg Kroah-Hartman static struct cdev bsg_cdev; 1063292b7f27SFUJITA Tomonori 10642c9ede55SAl Viro static char *bsg_devnode(struct device *dev, umode_t *mode) 10652bdf9149SKay Sievers { 10662bdf9149SKay Sievers return kasprintf(GFP_KERNEL, "bsg/%s", dev_name(dev)); 10672bdf9149SKay Sievers } 10682bdf9149SKay Sievers 10693d6392cfSJens Axboe static int __init bsg_init(void) 10703d6392cfSJens Axboe { 10713d6392cfSJens Axboe int ret, i; 107246f6ef4aSJens Axboe dev_t devid; 10733d6392cfSJens Axboe 10745309cb38SJens Axboe bsg_cmd_cachep = kmem_cache_create("bsg_cmd", 107520c2df83SPaul Mundt sizeof(struct bsg_command), 0, 0, NULL); 10765309cb38SJens Axboe if (!bsg_cmd_cachep) { 10775309cb38SJens Axboe printk(KERN_ERR "bsg: failed creating slab cache\n"); 10785309cb38SJens Axboe return -ENOMEM; 10795309cb38SJens Axboe } 10805309cb38SJens Axboe 108125fd1643SJens Axboe for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++) 10823d6392cfSJens Axboe INIT_HLIST_HEAD(&bsg_device_list[i]); 10833d6392cfSJens Axboe 10843d6392cfSJens Axboe bsg_class = class_create(THIS_MODULE, "bsg"); 10855309cb38SJens Axboe if (IS_ERR(bsg_class)) { 10869b9f770cSFUJITA Tomonori ret = PTR_ERR(bsg_class); 10879b9f770cSFUJITA Tomonori goto destroy_kmemcache; 10885309cb38SJens Axboe } 1089e454cea2SKay Sievers bsg_class->devnode = bsg_devnode; 10903d6392cfSJens Axboe 109146f6ef4aSJens Axboe ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg"); 10929b9f770cSFUJITA Tomonori if (ret) 10939b9f770cSFUJITA Tomonori goto destroy_bsg_class; 10943d6392cfSJens Axboe 109546f6ef4aSJens Axboe bsg_major = MAJOR(devid); 109646f6ef4aSJens Axboe 1097292b7f27SFUJITA Tomonori cdev_init(&bsg_cdev, &bsg_fops); 109846f6ef4aSJens Axboe ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS); 10999b9f770cSFUJITA Tomonori if (ret) 11009b9f770cSFUJITA Tomonori goto unregister_chrdev; 1101292b7f27SFUJITA Tomonori 11020ed081ceSFUJITA Tomonori printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION 11030ed081ceSFUJITA Tomonori " loaded (major %d)\n", bsg_major); 11043d6392cfSJens Axboe return 0; 11059b9f770cSFUJITA Tomonori unregister_chrdev: 11069b9f770cSFUJITA Tomonori unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS); 11079b9f770cSFUJITA Tomonori destroy_bsg_class: 11089b9f770cSFUJITA Tomonori class_destroy(bsg_class); 11099b9f770cSFUJITA Tomonori destroy_kmemcache: 11109b9f770cSFUJITA Tomonori kmem_cache_destroy(bsg_cmd_cachep); 11119b9f770cSFUJITA Tomonori return ret; 11123d6392cfSJens Axboe } 11133d6392cfSJens Axboe 11143d6392cfSJens Axboe MODULE_AUTHOR("Jens Axboe"); 11150ed081ceSFUJITA Tomonori MODULE_DESCRIPTION(BSG_DESCRIPTION); 11163d6392cfSJens Axboe MODULE_LICENSE("GPL"); 11173d6392cfSJens Axboe 11184e2872d6SFUJITA Tomonori device_initcall(bsg_init); 1119