1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
5 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
7 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
8 * - July2000
9 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
10 */
11
12 /*
13 * This handles all read/write requests to block devices
14 */
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/bio.h>
18 #include <linux/blkdev.h>
19 #include <linux/blk-pm.h>
20 #include <linux/blk-integrity.h>
21 #include <linux/highmem.h>
22 #include <linux/mm.h>
23 #include <linux/pagemap.h>
24 #include <linux/kernel_stat.h>
25 #include <linux/string.h>
26 #include <linux/init.h>
27 #include <linux/completion.h>
28 #include <linux/slab.h>
29 #include <linux/swap.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/fault-inject.h>
33 #include <linux/list_sort.h>
34 #include <linux/delay.h>
35 #include <linux/ratelimit.h>
36 #include <linux/pm_runtime.h>
37 #include <linux/t10-pi.h>
38 #include <linux/debugfs.h>
39 #include <linux/bpf.h>
40 #include <linux/part_stat.h>
41 #include <linux/sched/sysctl.h>
42 #include <linux/blk-crypto.h>
43
44 #define CREATE_TRACE_POINTS
45 #include <trace/events/block.h>
46
47 #include "blk.h"
48 #include "blk-mq-sched.h"
49 #include "blk-pm.h"
50 #include "blk-cgroup.h"
51 #include "blk-throttle.h"
52 #include "blk-ioprio.h"
53
54 struct dentry *blk_debugfs_root;
55
56 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
57 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
58 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
59 EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
60 EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
61 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_insert);
62
63 static DEFINE_IDA(blk_queue_ida);
64
65 /*
66 * For queue allocation
67 */
68 static struct kmem_cache *blk_requestq_cachep;
69
70 /*
71 * Controlling structure to kblockd
72 */
73 static struct workqueue_struct *kblockd_workqueue;
74
75 /**
76 * blk_queue_flag_set - atomically set a queue flag
77 * @flag: flag to be set
78 * @q: request queue
79 */
blk_queue_flag_set(unsigned int flag,struct request_queue * q)80 void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
81 {
82 set_bit(flag, &q->queue_flags);
83 }
84 EXPORT_SYMBOL(blk_queue_flag_set);
85
86 /**
87 * blk_queue_flag_clear - atomically clear a queue flag
88 * @flag: flag to be cleared
89 * @q: request queue
90 */
blk_queue_flag_clear(unsigned int flag,struct request_queue * q)91 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
92 {
93 clear_bit(flag, &q->queue_flags);
94 }
95 EXPORT_SYMBOL(blk_queue_flag_clear);
96
97 #define REQ_OP_NAME(name) [REQ_OP_##name] = #name
98 static const char *const blk_op_name[] = {
99 REQ_OP_NAME(READ),
100 REQ_OP_NAME(WRITE),
101 REQ_OP_NAME(FLUSH),
102 REQ_OP_NAME(DISCARD),
103 REQ_OP_NAME(SECURE_ERASE),
104 REQ_OP_NAME(ZONE_RESET),
105 REQ_OP_NAME(ZONE_RESET_ALL),
106 REQ_OP_NAME(ZONE_OPEN),
107 REQ_OP_NAME(ZONE_CLOSE),
108 REQ_OP_NAME(ZONE_FINISH),
109 REQ_OP_NAME(ZONE_APPEND),
110 REQ_OP_NAME(WRITE_ZEROES),
111 REQ_OP_NAME(DRV_IN),
112 REQ_OP_NAME(DRV_OUT),
113 };
114 #undef REQ_OP_NAME
115
116 /**
117 * blk_op_str - Return string XXX in the REQ_OP_XXX.
118 * @op: REQ_OP_XXX.
119 *
120 * Description: Centralize block layer function to convert REQ_OP_XXX into
121 * string format. Useful in the debugging and tracing bio or request. For
122 * invalid REQ_OP_XXX it returns string "UNKNOWN".
123 */
blk_op_str(enum req_op op)124 inline const char *blk_op_str(enum req_op op)
125 {
126 const char *op_str = "UNKNOWN";
127
128 if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op])
129 op_str = blk_op_name[op];
130
131 return op_str;
132 }
133 EXPORT_SYMBOL_GPL(blk_op_str);
134
135 static const struct {
136 int errno;
137 const char *name;
138 } blk_errors[] = {
139 [BLK_STS_OK] = { 0, "" },
140 [BLK_STS_NOTSUPP] = { -EOPNOTSUPP, "operation not supported" },
141 [BLK_STS_TIMEOUT] = { -ETIMEDOUT, "timeout" },
142 [BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" },
143 [BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" },
144 [BLK_STS_TARGET] = { -EREMOTEIO, "critical target" },
145 [BLK_STS_RESV_CONFLICT] = { -EBADE, "reservation conflict" },
146 [BLK_STS_MEDIUM] = { -ENODATA, "critical medium" },
147 [BLK_STS_PROTECTION] = { -EILSEQ, "protection" },
148 [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" },
149 [BLK_STS_DEV_RESOURCE] = { -EBUSY, "device resource" },
150 [BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" },
151 [BLK_STS_OFFLINE] = { -ENODEV, "device offline" },
152
153 /* device mapper special case, should not leak out: */
154 [BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" },
155
156 /* zone device specific errors */
157 [BLK_STS_ZONE_OPEN_RESOURCE] = { -ETOOMANYREFS, "open zones exceeded" },
158 [BLK_STS_ZONE_ACTIVE_RESOURCE] = { -EOVERFLOW, "active zones exceeded" },
159
160 /* Command duration limit device-side timeout */
161 [BLK_STS_DURATION_LIMIT] = { -ETIME, "duration limit exceeded" },
162
163 [BLK_STS_INVAL] = { -EINVAL, "invalid" },
164
165 /* everything else not covered above: */
166 [BLK_STS_IOERR] = { -EIO, "I/O" },
167 };
168
errno_to_blk_status(int errno)169 blk_status_t errno_to_blk_status(int errno)
170 {
171 int i;
172
173 for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
174 if (blk_errors[i].errno == errno)
175 return (__force blk_status_t)i;
176 }
177
178 return BLK_STS_IOERR;
179 }
180 EXPORT_SYMBOL_GPL(errno_to_blk_status);
181
blk_status_to_errno(blk_status_t status)182 int blk_status_to_errno(blk_status_t status)
183 {
184 int idx = (__force int)status;
185
186 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
187 return -EIO;
188 return blk_errors[idx].errno;
189 }
190 EXPORT_SYMBOL_GPL(blk_status_to_errno);
191
blk_status_to_str(blk_status_t status)192 const char *blk_status_to_str(blk_status_t status)
193 {
194 int idx = (__force int)status;
195
196 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
197 return "<null>";
198 return blk_errors[idx].name;
199 }
200 EXPORT_SYMBOL_GPL(blk_status_to_str);
201
202 /**
203 * blk_sync_queue - cancel any pending callbacks on a queue
204 * @q: the queue
205 *
206 * Description:
207 * The block layer may perform asynchronous callback activity
208 * on a queue, such as calling the unplug function after a timeout.
209 * A block device may call blk_sync_queue to ensure that any
210 * such activity is cancelled, thus allowing it to release resources
211 * that the callbacks might use. The caller must already have made sure
212 * that its ->submit_bio will not re-add plugging prior to calling
213 * this function.
214 *
215 * This function does not cancel any asynchronous activity arising
216 * out of elevator or throttling code. That would require elevator_exit()
217 * and blkcg_exit_queue() to be called with queue lock initialized.
218 *
219 */
blk_sync_queue(struct request_queue * q)220 void blk_sync_queue(struct request_queue *q)
221 {
222 timer_delete_sync(&q->timeout);
223 cancel_work_sync(&q->timeout_work);
224 }
225 EXPORT_SYMBOL(blk_sync_queue);
226
227 /**
228 * blk_set_pm_only - increment pm_only counter
229 * @q: request queue pointer
230 */
blk_set_pm_only(struct request_queue * q)231 void blk_set_pm_only(struct request_queue *q)
232 {
233 atomic_inc(&q->pm_only);
234 }
235 EXPORT_SYMBOL_GPL(blk_set_pm_only);
236
blk_clear_pm_only(struct request_queue * q)237 void blk_clear_pm_only(struct request_queue *q)
238 {
239 int pm_only;
240
241 pm_only = atomic_dec_return(&q->pm_only);
242 WARN_ON_ONCE(pm_only < 0);
243 if (pm_only == 0)
244 wake_up_all(&q->mq_freeze_wq);
245 }
246 EXPORT_SYMBOL_GPL(blk_clear_pm_only);
247
blk_free_queue_rcu(struct rcu_head * rcu_head)248 static void blk_free_queue_rcu(struct rcu_head *rcu_head)
249 {
250 struct request_queue *q = container_of(rcu_head,
251 struct request_queue, rcu_head);
252
253 percpu_ref_exit(&q->q_usage_counter);
254 kmem_cache_free(blk_requestq_cachep, q);
255 }
256
blk_free_queue(struct request_queue * q)257 static void blk_free_queue(struct request_queue *q)
258 {
259 blk_free_queue_stats(q->stats);
260 if (queue_is_mq(q))
261 blk_mq_release(q);
262
263 ida_free(&blk_queue_ida, q->id);
264 lockdep_unregister_key(&q->io_lock_cls_key);
265 lockdep_unregister_key(&q->q_lock_cls_key);
266 call_rcu(&q->rcu_head, blk_free_queue_rcu);
267 }
268
269 /**
270 * blk_put_queue - decrement the request_queue refcount
271 * @q: the request_queue structure to decrement the refcount for
272 *
273 * Decrements the refcount of the request_queue and free it when the refcount
274 * reaches 0.
275 */
blk_put_queue(struct request_queue * q)276 void blk_put_queue(struct request_queue *q)
277 {
278 if (refcount_dec_and_test(&q->refs))
279 blk_free_queue(q);
280 }
281 EXPORT_SYMBOL(blk_put_queue);
282
blk_queue_start_drain(struct request_queue * q)283 bool blk_queue_start_drain(struct request_queue *q)
284 {
285 /*
286 * When queue DYING flag is set, we need to block new req
287 * entering queue, so we call blk_freeze_queue_start() to
288 * prevent I/O from crossing blk_queue_enter().
289 */
290 bool freeze = __blk_freeze_queue_start(q, current);
291 if (queue_is_mq(q))
292 blk_mq_wake_waiters(q);
293 /* Make blk_queue_enter() reexamine the DYING flag. */
294 wake_up_all(&q->mq_freeze_wq);
295
296 return freeze;
297 }
298
299 /**
300 * blk_queue_enter() - try to increase q->q_usage_counter
301 * @q: request queue pointer
302 * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM
303 */
blk_queue_enter(struct request_queue * q,blk_mq_req_flags_t flags)304 int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
305 {
306 const bool pm = flags & BLK_MQ_REQ_PM;
307
308 while (!blk_try_enter_queue(q, pm)) {
309 if (flags & BLK_MQ_REQ_NOWAIT)
310 return -EAGAIN;
311
312 /*
313 * read pair of barrier in blk_freeze_queue_start(), we need to
314 * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
315 * reading .mq_freeze_depth or queue dying flag, otherwise the
316 * following wait may never return if the two reads are
317 * reordered.
318 */
319 smp_rmb();
320 wait_event(q->mq_freeze_wq,
321 (!q->mq_freeze_depth &&
322 blk_pm_resume_queue(pm, q)) ||
323 blk_queue_dying(q));
324 if (blk_queue_dying(q))
325 return -ENODEV;
326 }
327
328 rwsem_acquire_read(&q->q_lockdep_map, 0, 0, _RET_IP_);
329 rwsem_release(&q->q_lockdep_map, _RET_IP_);
330 return 0;
331 }
332
__bio_queue_enter(struct request_queue * q,struct bio * bio)333 int __bio_queue_enter(struct request_queue *q, struct bio *bio)
334 {
335 while (!blk_try_enter_queue(q, false)) {
336 struct gendisk *disk = bio->bi_bdev->bd_disk;
337
338 if (bio->bi_opf & REQ_NOWAIT) {
339 if (test_bit(GD_DEAD, &disk->state))
340 goto dead;
341 bio_wouldblock_error(bio);
342 return -EAGAIN;
343 }
344
345 /*
346 * read pair of barrier in blk_freeze_queue_start(), we need to
347 * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
348 * reading .mq_freeze_depth or queue dying flag, otherwise the
349 * following wait may never return if the two reads are
350 * reordered.
351 */
352 smp_rmb();
353 wait_event(q->mq_freeze_wq,
354 (!q->mq_freeze_depth &&
355 blk_pm_resume_queue(false, q)) ||
356 test_bit(GD_DEAD, &disk->state));
357 if (test_bit(GD_DEAD, &disk->state))
358 goto dead;
359 }
360
361 rwsem_acquire_read(&q->io_lockdep_map, 0, 0, _RET_IP_);
362 rwsem_release(&q->io_lockdep_map, _RET_IP_);
363 return 0;
364 dead:
365 bio_io_error(bio);
366 return -ENODEV;
367 }
368
blk_queue_exit(struct request_queue * q)369 void blk_queue_exit(struct request_queue *q)
370 {
371 percpu_ref_put(&q->q_usage_counter);
372 }
373
blk_queue_usage_counter_release(struct percpu_ref * ref)374 static void blk_queue_usage_counter_release(struct percpu_ref *ref)
375 {
376 struct request_queue *q =
377 container_of(ref, struct request_queue, q_usage_counter);
378
379 wake_up_all(&q->mq_freeze_wq);
380 }
381
blk_rq_timed_out_timer(struct timer_list * t)382 static void blk_rq_timed_out_timer(struct timer_list *t)
383 {
384 struct request_queue *q = timer_container_of(q, t, timeout);
385
386 kblockd_schedule_work(&q->timeout_work);
387 }
388
blk_timeout_work(struct work_struct * work)389 static void blk_timeout_work(struct work_struct *work)
390 {
391 }
392
blk_alloc_queue(struct queue_limits * lim,int node_id)393 struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id)
394 {
395 struct request_queue *q;
396 int error;
397
398 q = kmem_cache_alloc_node(blk_requestq_cachep, GFP_KERNEL | __GFP_ZERO,
399 node_id);
400 if (!q)
401 return ERR_PTR(-ENOMEM);
402
403 q->last_merge = NULL;
404
405 q->id = ida_alloc(&blk_queue_ida, GFP_KERNEL);
406 if (q->id < 0) {
407 error = q->id;
408 goto fail_q;
409 }
410
411 q->stats = blk_alloc_queue_stats();
412 if (!q->stats) {
413 error = -ENOMEM;
414 goto fail_id;
415 }
416
417 error = blk_set_default_limits(lim);
418 if (error)
419 goto fail_stats;
420 q->limits = *lim;
421
422 q->node = node_id;
423
424 atomic_set(&q->nr_active_requests_shared_tags, 0);
425
426 timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
427 INIT_WORK(&q->timeout_work, blk_timeout_work);
428 INIT_LIST_HEAD(&q->icq_list);
429
430 refcount_set(&q->refs, 1);
431 mutex_init(&q->debugfs_mutex);
432 mutex_init(&q->elevator_lock);
433 mutex_init(&q->sysfs_lock);
434 mutex_init(&q->limits_lock);
435 mutex_init(&q->rq_qos_mutex);
436 spin_lock_init(&q->queue_lock);
437
438 init_waitqueue_head(&q->mq_freeze_wq);
439 mutex_init(&q->mq_freeze_lock);
440
441 blkg_init_queue(q);
442
443 /*
444 * Init percpu_ref in atomic mode so that it's faster to shutdown.
445 * See blk_register_queue() for details.
446 */
447 error = percpu_ref_init(&q->q_usage_counter,
448 blk_queue_usage_counter_release,
449 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL);
450 if (error)
451 goto fail_stats;
452 lockdep_register_key(&q->io_lock_cls_key);
453 lockdep_register_key(&q->q_lock_cls_key);
454 lockdep_init_map(&q->io_lockdep_map, "&q->q_usage_counter(io)",
455 &q->io_lock_cls_key, 0);
456 lockdep_init_map(&q->q_lockdep_map, "&q->q_usage_counter(queue)",
457 &q->q_lock_cls_key, 0);
458
459 /* Teach lockdep about lock ordering (reclaim WRT queue freeze lock). */
460 fs_reclaim_acquire(GFP_KERNEL);
461 rwsem_acquire_read(&q->io_lockdep_map, 0, 0, _RET_IP_);
462 rwsem_release(&q->io_lockdep_map, _RET_IP_);
463 fs_reclaim_release(GFP_KERNEL);
464
465 q->nr_requests = BLKDEV_DEFAULT_RQ;
466
467 return q;
468
469 fail_stats:
470 blk_free_queue_stats(q->stats);
471 fail_id:
472 ida_free(&blk_queue_ida, q->id);
473 fail_q:
474 kmem_cache_free(blk_requestq_cachep, q);
475 return ERR_PTR(error);
476 }
477
478 /**
479 * blk_get_queue - increment the request_queue refcount
480 * @q: the request_queue structure to increment the refcount for
481 *
482 * Increment the refcount of the request_queue kobject.
483 *
484 * Context: Any context.
485 */
blk_get_queue(struct request_queue * q)486 bool blk_get_queue(struct request_queue *q)
487 {
488 if (unlikely(blk_queue_dying(q)))
489 return false;
490 refcount_inc(&q->refs);
491 return true;
492 }
493 EXPORT_SYMBOL(blk_get_queue);
494
495 #ifdef CONFIG_FAIL_MAKE_REQUEST
496
497 static DECLARE_FAULT_ATTR(fail_make_request);
498
setup_fail_make_request(char * str)499 static int __init setup_fail_make_request(char *str)
500 {
501 return setup_fault_attr(&fail_make_request, str);
502 }
503 __setup("fail_make_request=", setup_fail_make_request);
504
should_fail_request(struct block_device * part,unsigned int bytes)505 bool should_fail_request(struct block_device *part, unsigned int bytes)
506 {
507 return bdev_test_flag(part, BD_MAKE_IT_FAIL) &&
508 should_fail(&fail_make_request, bytes);
509 }
510
fail_make_request_debugfs(void)511 static int __init fail_make_request_debugfs(void)
512 {
513 struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
514 NULL, &fail_make_request);
515
516 return PTR_ERR_OR_ZERO(dir);
517 }
518
519 late_initcall(fail_make_request_debugfs);
520 #endif /* CONFIG_FAIL_MAKE_REQUEST */
521
bio_check_ro(struct bio * bio)522 static inline void bio_check_ro(struct bio *bio)
523 {
524 if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) {
525 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
526 return;
527
528 if (bdev_test_flag(bio->bi_bdev, BD_RO_WARNED))
529 return;
530
531 bdev_set_flag(bio->bi_bdev, BD_RO_WARNED);
532
533 /*
534 * Use ioctl to set underlying disk of raid/dm to read-only
535 * will trigger this.
536 */
537 pr_warn("Trying to write to read-only block-device %pg\n",
538 bio->bi_bdev);
539 }
540 }
541
should_fail_bio(struct bio * bio)542 int should_fail_bio(struct bio *bio)
543 {
544 if (should_fail_request(bdev_whole(bio->bi_bdev), bio->bi_iter.bi_size))
545 return -EIO;
546 return 0;
547 }
548 ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO);
549
550 /*
551 * Check whether this bio extends beyond the end of the device or partition.
552 * This may well happen - the kernel calls bread() without checking the size of
553 * the device, e.g., when mounting a file system.
554 */
bio_check_eod(struct bio * bio)555 static inline int bio_check_eod(struct bio *bio)
556 {
557 sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
558 unsigned int nr_sectors = bio_sectors(bio);
559
560 if (nr_sectors &&
561 (nr_sectors > maxsector ||
562 bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
563 if (!maxsector)
564 return -EIO;
565 pr_info_ratelimited("%s: attempt to access beyond end of device\n"
566 "%pg: rw=%d, sector=%llu, nr_sectors = %u limit=%llu\n",
567 current->comm, bio->bi_bdev, bio->bi_opf,
568 bio->bi_iter.bi_sector, nr_sectors, maxsector);
569 return -EIO;
570 }
571 return 0;
572 }
573
574 /*
575 * Remap block n of partition p to block n+start(p) of the disk.
576 */
blk_partition_remap(struct bio * bio)577 static int blk_partition_remap(struct bio *bio)
578 {
579 struct block_device *p = bio->bi_bdev;
580
581 if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
582 return -EIO;
583 if (bio_sectors(bio)) {
584 bio->bi_iter.bi_sector += p->bd_start_sect;
585 trace_block_bio_remap(bio, p->bd_dev,
586 bio->bi_iter.bi_sector -
587 p->bd_start_sect);
588 }
589 bio_set_flag(bio, BIO_REMAPPED);
590 return 0;
591 }
592
593 /*
594 * Check write append to a zoned block device.
595 */
blk_check_zone_append(struct request_queue * q,struct bio * bio)596 static inline blk_status_t blk_check_zone_append(struct request_queue *q,
597 struct bio *bio)
598 {
599 int nr_sectors = bio_sectors(bio);
600
601 /* Only applicable to zoned block devices */
602 if (!bdev_is_zoned(bio->bi_bdev))
603 return BLK_STS_NOTSUPP;
604
605 /* The bio sector must point to the start of a sequential zone */
606 if (!bdev_is_zone_start(bio->bi_bdev, bio->bi_iter.bi_sector))
607 return BLK_STS_IOERR;
608
609 /*
610 * Not allowed to cross zone boundaries. Otherwise, the BIO will be
611 * split and could result in non-contiguous sectors being written in
612 * different zones.
613 */
614 if (nr_sectors > q->limits.chunk_sectors)
615 return BLK_STS_IOERR;
616
617 /* Make sure the BIO is small enough and will not get split */
618 if (nr_sectors > q->limits.max_zone_append_sectors)
619 return BLK_STS_IOERR;
620
621 bio->bi_opf |= REQ_NOMERGE;
622
623 return BLK_STS_OK;
624 }
625
__submit_bio(struct bio * bio)626 static void __submit_bio(struct bio *bio)
627 {
628 /* If plug is not used, add new plug here to cache nsecs time. */
629 struct blk_plug plug;
630
631 if (unlikely(!blk_crypto_bio_prep(&bio)))
632 return;
633
634 blk_start_plug(&plug);
635
636 if (!bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO)) {
637 blk_mq_submit_bio(bio);
638 } else if (likely(bio_queue_enter(bio) == 0)) {
639 struct gendisk *disk = bio->bi_bdev->bd_disk;
640
641 if ((bio->bi_opf & REQ_POLLED) &&
642 !(disk->queue->limits.features & BLK_FEAT_POLL)) {
643 bio->bi_status = BLK_STS_NOTSUPP;
644 bio_endio(bio);
645 } else {
646 disk->fops->submit_bio(bio);
647 }
648 blk_queue_exit(disk->queue);
649 }
650
651 blk_finish_plug(&plug);
652 }
653
654 /*
655 * The loop in this function may be a bit non-obvious, and so deserves some
656 * explanation:
657 *
658 * - Before entering the loop, bio->bi_next is NULL (as all callers ensure
659 * that), so we have a list with a single bio.
660 * - We pretend that we have just taken it off a longer list, so we assign
661 * bio_list to a pointer to the bio_list_on_stack, thus initialising the
662 * bio_list of new bios to be added. ->submit_bio() may indeed add some more
663 * bios through a recursive call to submit_bio_noacct. If it did, we find a
664 * non-NULL value in bio_list and re-enter the loop from the top.
665 * - In this case we really did just take the bio of the top of the list (no
666 * pretending) and so remove it from bio_list, and call into ->submit_bio()
667 * again.
668 *
669 * bio_list_on_stack[0] contains bios submitted by the current ->submit_bio.
670 * bio_list_on_stack[1] contains bios that were submitted before the current
671 * ->submit_bio, but that haven't been processed yet.
672 */
__submit_bio_noacct(struct bio * bio)673 static void __submit_bio_noacct(struct bio *bio)
674 {
675 struct bio_list bio_list_on_stack[2];
676
677 BUG_ON(bio->bi_next);
678
679 bio_list_init(&bio_list_on_stack[0]);
680 current->bio_list = bio_list_on_stack;
681
682 do {
683 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
684 struct bio_list lower, same;
685
686 /*
687 * Create a fresh bio_list for all subordinate requests.
688 */
689 bio_list_on_stack[1] = bio_list_on_stack[0];
690 bio_list_init(&bio_list_on_stack[0]);
691
692 __submit_bio(bio);
693
694 /*
695 * Sort new bios into those for a lower level and those for the
696 * same level.
697 */
698 bio_list_init(&lower);
699 bio_list_init(&same);
700 while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
701 if (q == bdev_get_queue(bio->bi_bdev))
702 bio_list_add(&same, bio);
703 else
704 bio_list_add(&lower, bio);
705
706 /*
707 * Now assemble so we handle the lowest level first.
708 */
709 bio_list_merge(&bio_list_on_stack[0], &lower);
710 bio_list_merge(&bio_list_on_stack[0], &same);
711 bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
712 } while ((bio = bio_list_pop(&bio_list_on_stack[0])));
713
714 current->bio_list = NULL;
715 }
716
__submit_bio_noacct_mq(struct bio * bio)717 static void __submit_bio_noacct_mq(struct bio *bio)
718 {
719 struct bio_list bio_list[2] = { };
720
721 current->bio_list = bio_list;
722
723 do {
724 __submit_bio(bio);
725 } while ((bio = bio_list_pop(&bio_list[0])));
726
727 current->bio_list = NULL;
728 }
729
submit_bio_noacct_nocheck(struct bio * bio,bool split)730 void submit_bio_noacct_nocheck(struct bio *bio, bool split)
731 {
732 blk_cgroup_bio_start(bio);
733
734 if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
735 trace_block_bio_queue(bio);
736 /*
737 * Now that enqueuing has been traced, we need to trace
738 * completion as well.
739 */
740 bio_set_flag(bio, BIO_TRACE_COMPLETION);
741 }
742
743 /*
744 * We only want one ->submit_bio to be active at a time, else stack
745 * usage with stacked devices could be a problem. Use current->bio_list
746 * to collect a list of requests submited by a ->submit_bio method while
747 * it is active, and then process them after it returned.
748 */
749 if (current->bio_list) {
750 if (split)
751 bio_list_add_head(¤t->bio_list[0], bio);
752 else
753 bio_list_add(¤t->bio_list[0], bio);
754 } else if (!bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO)) {
755 __submit_bio_noacct_mq(bio);
756 } else {
757 __submit_bio_noacct(bio);
758 }
759 }
760
blk_validate_atomic_write_op_size(struct request_queue * q,struct bio * bio)761 static blk_status_t blk_validate_atomic_write_op_size(struct request_queue *q,
762 struct bio *bio)
763 {
764 if (bio->bi_iter.bi_size > queue_atomic_write_unit_max_bytes(q))
765 return BLK_STS_INVAL;
766
767 if (bio->bi_iter.bi_size % queue_atomic_write_unit_min_bytes(q))
768 return BLK_STS_INVAL;
769
770 return BLK_STS_OK;
771 }
772
773 /**
774 * submit_bio_noacct - re-submit a bio to the block device layer for I/O
775 * @bio: The bio describing the location in memory and on the device.
776 *
777 * This is a version of submit_bio() that shall only be used for I/O that is
778 * resubmitted to lower level drivers by stacking block drivers. All file
779 * systems and other upper level users of the block layer should use
780 * submit_bio() instead.
781 */
submit_bio_noacct(struct bio * bio)782 void submit_bio_noacct(struct bio *bio)
783 {
784 struct block_device *bdev = bio->bi_bdev;
785 struct request_queue *q = bdev_get_queue(bdev);
786 blk_status_t status = BLK_STS_IOERR;
787
788 might_sleep();
789
790 /*
791 * For a REQ_NOWAIT based request, return -EOPNOTSUPP
792 * if queue does not support NOWAIT.
793 */
794 if ((bio->bi_opf & REQ_NOWAIT) && !bdev_nowait(bdev))
795 goto not_supported;
796
797 if (should_fail_bio(bio))
798 goto end_io;
799 bio_check_ro(bio);
800 if (!bio_flagged(bio, BIO_REMAPPED)) {
801 if (unlikely(bio_check_eod(bio)))
802 goto end_io;
803 if (bdev_is_partition(bdev) &&
804 unlikely(blk_partition_remap(bio)))
805 goto end_io;
806 }
807
808 /*
809 * Filter flush bio's early so that bio based drivers without flush
810 * support don't have to worry about them.
811 */
812 if (op_is_flush(bio->bi_opf)) {
813 if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_WRITE &&
814 bio_op(bio) != REQ_OP_ZONE_APPEND))
815 goto end_io;
816 if (!bdev_write_cache(bdev)) {
817 bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
818 if (!bio_sectors(bio)) {
819 status = BLK_STS_OK;
820 goto end_io;
821 }
822 }
823 }
824
825 switch (bio_op(bio)) {
826 case REQ_OP_READ:
827 break;
828 case REQ_OP_WRITE:
829 if (bio->bi_opf & REQ_ATOMIC) {
830 status = blk_validate_atomic_write_op_size(q, bio);
831 if (status != BLK_STS_OK)
832 goto end_io;
833 }
834 break;
835 case REQ_OP_FLUSH:
836 /*
837 * REQ_OP_FLUSH can't be submitted through bios, it is only
838 * synthetized in struct request by the flush state machine.
839 */
840 goto not_supported;
841 case REQ_OP_DISCARD:
842 if (!bdev_max_discard_sectors(bdev))
843 goto not_supported;
844 break;
845 case REQ_OP_SECURE_ERASE:
846 if (!bdev_max_secure_erase_sectors(bdev))
847 goto not_supported;
848 break;
849 case REQ_OP_ZONE_APPEND:
850 status = blk_check_zone_append(q, bio);
851 if (status != BLK_STS_OK)
852 goto end_io;
853 break;
854 case REQ_OP_WRITE_ZEROES:
855 if (!q->limits.max_write_zeroes_sectors)
856 goto not_supported;
857 break;
858 case REQ_OP_ZONE_RESET:
859 case REQ_OP_ZONE_OPEN:
860 case REQ_OP_ZONE_CLOSE:
861 case REQ_OP_ZONE_FINISH:
862 case REQ_OP_ZONE_RESET_ALL:
863 if (!bdev_is_zoned(bio->bi_bdev))
864 goto not_supported;
865 break;
866 case REQ_OP_DRV_IN:
867 case REQ_OP_DRV_OUT:
868 /*
869 * Driver private operations are only used with passthrough
870 * requests.
871 */
872 fallthrough;
873 default:
874 goto not_supported;
875 }
876
877 if (blk_throtl_bio(bio))
878 return;
879 submit_bio_noacct_nocheck(bio, false);
880 return;
881
882 not_supported:
883 status = BLK_STS_NOTSUPP;
884 end_io:
885 bio->bi_status = status;
886 bio_endio(bio);
887 }
888 EXPORT_SYMBOL(submit_bio_noacct);
889
bio_set_ioprio(struct bio * bio)890 static void bio_set_ioprio(struct bio *bio)
891 {
892 /* Nobody set ioprio so far? Initialize it based on task's nice value */
893 if (IOPRIO_PRIO_CLASS(bio->bi_ioprio) == IOPRIO_CLASS_NONE)
894 bio->bi_ioprio = get_current_ioprio();
895 blkcg_set_ioprio(bio);
896 }
897
898 /**
899 * submit_bio - submit a bio to the block device layer for I/O
900 * @bio: The &struct bio which describes the I/O
901 *
902 * submit_bio() is used to submit I/O requests to block devices. It is passed a
903 * fully set up &struct bio that describes the I/O that needs to be done. The
904 * bio will be send to the device described by the bi_bdev field.
905 *
906 * The success/failure status of the request, along with notification of
907 * completion, is delivered asynchronously through the ->bi_end_io() callback
908 * in @bio. The bio must NOT be touched by the caller until ->bi_end_io() has
909 * been called.
910 */
submit_bio(struct bio * bio)911 void submit_bio(struct bio *bio)
912 {
913 if (bio_op(bio) == REQ_OP_READ) {
914 task_io_account_read(bio->bi_iter.bi_size);
915 count_vm_events(PGPGIN, bio_sectors(bio));
916 } else if (bio_op(bio) == REQ_OP_WRITE) {
917 count_vm_events(PGPGOUT, bio_sectors(bio));
918 }
919
920 bio_set_ioprio(bio);
921 submit_bio_noacct(bio);
922 }
923 EXPORT_SYMBOL(submit_bio);
924
925 /**
926 * bio_poll - poll for BIO completions
927 * @bio: bio to poll for
928 * @iob: batches of IO
929 * @flags: BLK_POLL_* flags that control the behavior
930 *
931 * Poll for completions on queue associated with the bio. Returns number of
932 * completed entries found.
933 *
934 * Note: the caller must either be the context that submitted @bio, or
935 * be in a RCU critical section to prevent freeing of @bio.
936 */
bio_poll(struct bio * bio,struct io_comp_batch * iob,unsigned int flags)937 int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
938 {
939 blk_qc_t cookie = READ_ONCE(bio->bi_cookie);
940 struct block_device *bdev;
941 struct request_queue *q;
942 int ret = 0;
943
944 bdev = READ_ONCE(bio->bi_bdev);
945 if (!bdev)
946 return 0;
947
948 q = bdev_get_queue(bdev);
949 if (cookie == BLK_QC_T_NONE)
950 return 0;
951
952 blk_flush_plug(current->plug, false);
953
954 /*
955 * We need to be able to enter a frozen queue, similar to how
956 * timeouts also need to do that. If that is blocked, then we can
957 * have pending IO when a queue freeze is started, and then the
958 * wait for the freeze to finish will wait for polled requests to
959 * timeout as the poller is preventer from entering the queue and
960 * completing them. As long as we prevent new IO from being queued,
961 * that should be all that matters.
962 */
963 if (!percpu_ref_tryget(&q->q_usage_counter))
964 return 0;
965 if (queue_is_mq(q)) {
966 ret = blk_mq_poll(q, cookie, iob, flags);
967 } else {
968 struct gendisk *disk = q->disk;
969
970 if ((q->limits.features & BLK_FEAT_POLL) && disk &&
971 disk->fops->poll_bio)
972 ret = disk->fops->poll_bio(bio, iob, flags);
973 }
974 blk_queue_exit(q);
975 return ret;
976 }
977 EXPORT_SYMBOL_GPL(bio_poll);
978
979 /*
980 * Helper to implement file_operations.iopoll. Requires the bio to be stored
981 * in iocb->private, and cleared before freeing the bio.
982 */
iocb_bio_iopoll(struct kiocb * kiocb,struct io_comp_batch * iob,unsigned int flags)983 int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
984 unsigned int flags)
985 {
986 struct bio *bio;
987 int ret = 0;
988
989 /*
990 * Note: the bio cache only uses SLAB_TYPESAFE_BY_RCU, so bio can
991 * point to a freshly allocated bio at this point. If that happens
992 * we have a few cases to consider:
993 *
994 * 1) the bio is beeing initialized and bi_bdev is NULL. We can just
995 * simply nothing in this case
996 * 2) the bio points to a not poll enabled device. bio_poll will catch
997 * this and return 0
998 * 3) the bio points to a poll capable device, including but not
999 * limited to the one that the original bio pointed to. In this
1000 * case we will call into the actual poll method and poll for I/O,
1001 * even if we don't need to, but it won't cause harm either.
1002 *
1003 * For cases 2) and 3) above the RCU grace period ensures that bi_bdev
1004 * is still allocated. Because partitions hold a reference to the whole
1005 * device bdev and thus disk, the disk is also still valid. Grabbing
1006 * a reference to the queue in bio_poll() ensures the hctxs and requests
1007 * are still valid as well.
1008 */
1009 rcu_read_lock();
1010 bio = READ_ONCE(kiocb->private);
1011 if (bio)
1012 ret = bio_poll(bio, iob, flags);
1013 rcu_read_unlock();
1014
1015 return ret;
1016 }
1017 EXPORT_SYMBOL_GPL(iocb_bio_iopoll);
1018
update_io_ticks(struct block_device * part,unsigned long now,bool end)1019 void update_io_ticks(struct block_device *part, unsigned long now, bool end)
1020 {
1021 unsigned long stamp;
1022 again:
1023 stamp = READ_ONCE(part->bd_stamp);
1024 if (unlikely(time_after(now, stamp)) &&
1025 likely(try_cmpxchg(&part->bd_stamp, &stamp, now)) &&
1026 (end || bdev_count_inflight(part)))
1027 __part_stat_add(part, io_ticks, now - stamp);
1028
1029 if (bdev_is_partition(part)) {
1030 part = bdev_whole(part);
1031 goto again;
1032 }
1033 }
1034
bdev_start_io_acct(struct block_device * bdev,enum req_op op,unsigned long start_time)1035 unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op,
1036 unsigned long start_time)
1037 {
1038 part_stat_lock();
1039 update_io_ticks(bdev, start_time, false);
1040 part_stat_local_inc(bdev, in_flight[op_is_write(op)]);
1041 part_stat_unlock();
1042
1043 return start_time;
1044 }
1045 EXPORT_SYMBOL(bdev_start_io_acct);
1046
1047 /**
1048 * bio_start_io_acct - start I/O accounting for bio based drivers
1049 * @bio: bio to start account for
1050 *
1051 * Returns the start time that should be passed back to bio_end_io_acct().
1052 */
bio_start_io_acct(struct bio * bio)1053 unsigned long bio_start_io_acct(struct bio *bio)
1054 {
1055 return bdev_start_io_acct(bio->bi_bdev, bio_op(bio), jiffies);
1056 }
1057 EXPORT_SYMBOL_GPL(bio_start_io_acct);
1058
bdev_end_io_acct(struct block_device * bdev,enum req_op op,unsigned int sectors,unsigned long start_time)1059 void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
1060 unsigned int sectors, unsigned long start_time)
1061 {
1062 const int sgrp = op_stat_group(op);
1063 unsigned long now = READ_ONCE(jiffies);
1064 unsigned long duration = now - start_time;
1065
1066 part_stat_lock();
1067 update_io_ticks(bdev, now, true);
1068 part_stat_inc(bdev, ios[sgrp]);
1069 part_stat_add(bdev, sectors[sgrp], sectors);
1070 part_stat_add(bdev, nsecs[sgrp], jiffies_to_nsecs(duration));
1071 part_stat_local_dec(bdev, in_flight[op_is_write(op)]);
1072 part_stat_unlock();
1073 }
1074 EXPORT_SYMBOL(bdev_end_io_acct);
1075
bio_end_io_acct_remapped(struct bio * bio,unsigned long start_time,struct block_device * orig_bdev)1076 void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
1077 struct block_device *orig_bdev)
1078 {
1079 bdev_end_io_acct(orig_bdev, bio_op(bio), bio_sectors(bio), start_time);
1080 }
1081 EXPORT_SYMBOL_GPL(bio_end_io_acct_remapped);
1082
1083 /**
1084 * blk_lld_busy - Check if underlying low-level drivers of a device are busy
1085 * @q : the queue of the device being checked
1086 *
1087 * Description:
1088 * Check if underlying low-level drivers of a device are busy.
1089 * If the drivers want to export their busy state, they must set own
1090 * exporting function using blk_queue_lld_busy() first.
1091 *
1092 * Basically, this function is used only by request stacking drivers
1093 * to stop dispatching requests to underlying devices when underlying
1094 * devices are busy. This behavior helps more I/O merging on the queue
1095 * of the request stacking driver and prevents I/O throughput regression
1096 * on burst I/O load.
1097 *
1098 * Return:
1099 * 0 - Not busy (The request stacking driver should dispatch request)
1100 * 1 - Busy (The request stacking driver should stop dispatching request)
1101 */
blk_lld_busy(struct request_queue * q)1102 int blk_lld_busy(struct request_queue *q)
1103 {
1104 if (queue_is_mq(q) && q->mq_ops->busy)
1105 return q->mq_ops->busy(q);
1106
1107 return 0;
1108 }
1109 EXPORT_SYMBOL_GPL(blk_lld_busy);
1110
kblockd_schedule_work(struct work_struct * work)1111 int kblockd_schedule_work(struct work_struct *work)
1112 {
1113 return queue_work(kblockd_workqueue, work);
1114 }
1115 EXPORT_SYMBOL(kblockd_schedule_work);
1116
kblockd_mod_delayed_work_on(int cpu,struct delayed_work * dwork,unsigned long delay)1117 int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
1118 unsigned long delay)
1119 {
1120 return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
1121 }
1122 EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
1123
blk_start_plug_nr_ios(struct blk_plug * plug,unsigned short nr_ios)1124 void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios)
1125 {
1126 struct task_struct *tsk = current;
1127
1128 /*
1129 * If this is a nested plug, don't actually assign it.
1130 */
1131 if (tsk->plug)
1132 return;
1133
1134 plug->cur_ktime = 0;
1135 rq_list_init(&plug->mq_list);
1136 rq_list_init(&plug->cached_rqs);
1137 plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT);
1138 plug->rq_count = 0;
1139 plug->multiple_queues = false;
1140 plug->has_elevator = false;
1141 INIT_LIST_HEAD(&plug->cb_list);
1142
1143 /*
1144 * Store ordering should not be needed here, since a potential
1145 * preempt will imply a full memory barrier
1146 */
1147 tsk->plug = plug;
1148 }
1149
1150 /**
1151 * blk_start_plug - initialize blk_plug and track it inside the task_struct
1152 * @plug: The &struct blk_plug that needs to be initialized
1153 *
1154 * Description:
1155 * blk_start_plug() indicates to the block layer an intent by the caller
1156 * to submit multiple I/O requests in a batch. The block layer may use
1157 * this hint to defer submitting I/Os from the caller until blk_finish_plug()
1158 * is called. However, the block layer may choose to submit requests
1159 * before a call to blk_finish_plug() if the number of queued I/Os
1160 * exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than
1161 * %BLK_PLUG_FLUSH_SIZE. The queued I/Os may also be submitted early if
1162 * the task schedules (see below).
1163 *
1164 * Tracking blk_plug inside the task_struct will help with auto-flushing the
1165 * pending I/O should the task end up blocking between blk_start_plug() and
1166 * blk_finish_plug(). This is important from a performance perspective, but
1167 * also ensures that we don't deadlock. For instance, if the task is blocking
1168 * for a memory allocation, memory reclaim could end up wanting to free a
1169 * page belonging to that request that is currently residing in our private
1170 * plug. By flushing the pending I/O when the process goes to sleep, we avoid
1171 * this kind of deadlock.
1172 */
blk_start_plug(struct blk_plug * plug)1173 void blk_start_plug(struct blk_plug *plug)
1174 {
1175 blk_start_plug_nr_ios(plug, 1);
1176 }
1177 EXPORT_SYMBOL(blk_start_plug);
1178
flush_plug_callbacks(struct blk_plug * plug,bool from_schedule)1179 static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
1180 {
1181 LIST_HEAD(callbacks);
1182
1183 while (!list_empty(&plug->cb_list)) {
1184 list_splice_init(&plug->cb_list, &callbacks);
1185
1186 while (!list_empty(&callbacks)) {
1187 struct blk_plug_cb *cb = list_first_entry(&callbacks,
1188 struct blk_plug_cb,
1189 list);
1190 list_del(&cb->list);
1191 cb->callback(cb, from_schedule);
1192 }
1193 }
1194 }
1195
blk_check_plugged(blk_plug_cb_fn unplug,void * data,int size)1196 struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
1197 int size)
1198 {
1199 struct blk_plug *plug = current->plug;
1200 struct blk_plug_cb *cb;
1201
1202 if (!plug)
1203 return NULL;
1204
1205 list_for_each_entry(cb, &plug->cb_list, list)
1206 if (cb->callback == unplug && cb->data == data)
1207 return cb;
1208
1209 /* Not currently on the callback list */
1210 BUG_ON(size < sizeof(*cb));
1211 cb = kzalloc(size, GFP_ATOMIC);
1212 if (cb) {
1213 cb->data = data;
1214 cb->callback = unplug;
1215 list_add(&cb->list, &plug->cb_list);
1216 }
1217 return cb;
1218 }
1219 EXPORT_SYMBOL(blk_check_plugged);
1220
__blk_flush_plug(struct blk_plug * plug,bool from_schedule)1221 void __blk_flush_plug(struct blk_plug *plug, bool from_schedule)
1222 {
1223 if (!list_empty(&plug->cb_list))
1224 flush_plug_callbacks(plug, from_schedule);
1225 blk_mq_flush_plug_list(plug, from_schedule);
1226 /*
1227 * Unconditionally flush out cached requests, even if the unplug
1228 * event came from schedule. Since we know hold references to the
1229 * queue for cached requests, we don't want a blocked task holding
1230 * up a queue freeze/quiesce event.
1231 */
1232 if (unlikely(!rq_list_empty(&plug->cached_rqs)))
1233 blk_mq_free_plug_rqs(plug);
1234
1235 plug->cur_ktime = 0;
1236 current->flags &= ~PF_BLOCK_TS;
1237 }
1238
1239 /**
1240 * blk_finish_plug - mark the end of a batch of submitted I/O
1241 * @plug: The &struct blk_plug passed to blk_start_plug()
1242 *
1243 * Description:
1244 * Indicate that a batch of I/O submissions is complete. This function
1245 * must be paired with an initial call to blk_start_plug(). The intent
1246 * is to allow the block layer to optimize I/O submission. See the
1247 * documentation for blk_start_plug() for more information.
1248 */
blk_finish_plug(struct blk_plug * plug)1249 void blk_finish_plug(struct blk_plug *plug)
1250 {
1251 if (plug == current->plug) {
1252 __blk_flush_plug(plug, false);
1253 current->plug = NULL;
1254 }
1255 }
1256 EXPORT_SYMBOL(blk_finish_plug);
1257
blk_io_schedule(void)1258 void blk_io_schedule(void)
1259 {
1260 /* Prevent hang_check timer from firing at us during very long I/O */
1261 unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2;
1262
1263 if (timeout)
1264 io_schedule_timeout(timeout);
1265 else
1266 io_schedule();
1267 }
1268 EXPORT_SYMBOL_GPL(blk_io_schedule);
1269
blk_dev_init(void)1270 int __init blk_dev_init(void)
1271 {
1272 BUILD_BUG_ON((__force u32)REQ_OP_LAST >= (1 << REQ_OP_BITS));
1273 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1274 sizeof_field(struct request, cmd_flags));
1275 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1276 sizeof_field(struct bio, bi_opf));
1277
1278 /* used for unplugging and affects IO latency/throughput - HIGHPRI */
1279 kblockd_workqueue = alloc_workqueue("kblockd",
1280 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1281 if (!kblockd_workqueue)
1282 panic("Failed to create kblockd\n");
1283
1284 blk_requestq_cachep = KMEM_CACHE(request_queue, SLAB_PANIC);
1285
1286 blk_debugfs_root = debugfs_create_dir("block", NULL);
1287
1288 return 0;
1289 }
1290