1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef BLK_INTERNAL_H
3 #define BLK_INTERNAL_H
4
5 #include <linux/bio-integrity.h>
6 #include <linux/blk-crypto.h>
7 #include <linux/lockdep.h>
8 #include <linux/memblock.h> /* for max_pfn/max_low_pfn */
9 #include <linux/sched/sysctl.h>
10 #include <linux/timekeeping.h>
11 #include <xen/xen.h>
12 #include "blk-crypto-internal.h"
13
14 struct elv_change_ctx;
15
16 /*
17 * Default upper limit for the software max_sectors limit used for regular I/Os.
18 * This can be increased through sysfs.
19 *
20 * This should not be confused with the max_hw_sector limit that is entirely
21 * controlled by the block device driver, usually based on hardware limits.
22 */
23 #define BLK_DEF_MAX_SECTORS_CAP (SZ_4M >> SECTOR_SHIFT)
24
25 #define BLK_DEV_MAX_SECTORS (LLONG_MAX >> 9)
26 #define BLK_MIN_SEGMENT_SIZE 4096
27
28 /* Max future timer expiry for timeouts */
29 #define BLK_MAX_TIMEOUT (5 * HZ)
30
31 extern const struct kobj_type blk_queue_ktype;
32 extern struct dentry *blk_debugfs_root;
33
34 struct blk_flush_queue {
35 spinlock_t mq_flush_lock;
36 unsigned int flush_pending_idx:1;
37 unsigned int flush_running_idx:1;
38 blk_status_t rq_status;
39 unsigned long flush_pending_since;
40 struct list_head flush_queue[2];
41 unsigned long flush_data_in_flight;
42 struct request *flush_rq;
43 struct rcu_head rcu_head;
44 };
45
46 bool is_flush_rq(struct request *req);
47
48 struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
49 gfp_t flags);
50 void blk_free_flush_queue(struct blk_flush_queue *q);
51
52 bool __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
53 bool blk_queue_start_drain(struct request_queue *q);
54 bool __blk_freeze_queue_start(struct request_queue *q,
55 struct task_struct *owner);
56 int __bio_queue_enter(struct request_queue *q, struct bio *bio);
57 void submit_bio_noacct_nocheck(struct bio *bio, bool split);
58 void bio_await_chain(struct bio *bio);
59
blk_try_enter_queue(struct request_queue * q,bool pm)60 static inline bool blk_try_enter_queue(struct request_queue *q, bool pm)
61 {
62 rcu_read_lock();
63 if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter))
64 goto fail;
65
66 /*
67 * The code that increments the pm_only counter must ensure that the
68 * counter is globally visible before the queue is unfrozen.
69 */
70 if (blk_queue_pm_only(q) &&
71 (!pm || queue_rpm_status(q) == RPM_SUSPENDED))
72 goto fail_put;
73
74 rcu_read_unlock();
75 return true;
76
77 fail_put:
78 blk_queue_exit(q);
79 fail:
80 rcu_read_unlock();
81 return false;
82 }
83
bio_queue_enter(struct bio * bio)84 static inline int bio_queue_enter(struct bio *bio)
85 {
86 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
87
88 if (blk_try_enter_queue(q, false)) {
89 rwsem_acquire_read(&q->io_lockdep_map, 0, 0, _RET_IP_);
90 rwsem_release(&q->io_lockdep_map, _RET_IP_);
91 return 0;
92 }
93 return __bio_queue_enter(q, bio);
94 }
95
blk_wait_io(struct completion * done)96 static inline void blk_wait_io(struct completion *done)
97 {
98 /* Prevent hang_check timer from firing at us during very long I/O */
99 unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2;
100
101 if (timeout)
102 while (!wait_for_completion_io_timeout(done, timeout))
103 ;
104 else
105 wait_for_completion_io(done);
106 }
107
108 struct block_device *blkdev_get_no_open(dev_t dev, bool autoload);
109 void blkdev_put_no_open(struct block_device *bdev);
110
111 #define BIO_INLINE_VECS 4
112 struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
113 gfp_t gfp_mask);
114 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs);
115
116 bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv,
117 struct page *page, unsigned len, unsigned offset);
118
biovec_phys_mergeable(struct request_queue * q,struct bio_vec * vec1,struct bio_vec * vec2)119 static inline bool biovec_phys_mergeable(struct request_queue *q,
120 struct bio_vec *vec1, struct bio_vec *vec2)
121 {
122 unsigned long mask = queue_segment_boundary(q);
123 phys_addr_t addr1 = bvec_phys(vec1);
124 phys_addr_t addr2 = bvec_phys(vec2);
125
126 /*
127 * Merging adjacent physical pages may not work correctly under KMSAN
128 * if their metadata pages aren't adjacent. Just disable merging.
129 */
130 if (IS_ENABLED(CONFIG_KMSAN))
131 return false;
132
133 if (addr1 + vec1->bv_len != addr2)
134 return false;
135 if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
136 return false;
137 if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
138 return false;
139 return true;
140 }
141
__bvec_gap_to_prev(const struct queue_limits * lim,struct bio_vec * bprv,unsigned int offset)142 static inline bool __bvec_gap_to_prev(const struct queue_limits *lim,
143 struct bio_vec *bprv, unsigned int offset)
144 {
145 return (offset & lim->virt_boundary_mask) ||
146 ((bprv->bv_offset + bprv->bv_len) & lim->virt_boundary_mask);
147 }
148
149 /*
150 * Check if adding a bio_vec after bprv with offset would create a gap in
151 * the SG list. Most drivers don't care about this, but some do.
152 */
bvec_gap_to_prev(const struct queue_limits * lim,struct bio_vec * bprv,unsigned int offset)153 static inline bool bvec_gap_to_prev(const struct queue_limits *lim,
154 struct bio_vec *bprv, unsigned int offset)
155 {
156 if (!lim->virt_boundary_mask)
157 return false;
158 return __bvec_gap_to_prev(lim, bprv, offset);
159 }
160
rq_mergeable(struct request * rq)161 static inline bool rq_mergeable(struct request *rq)
162 {
163 if (blk_rq_is_passthrough(rq))
164 return false;
165
166 if (req_op(rq) == REQ_OP_FLUSH)
167 return false;
168
169 if (req_op(rq) == REQ_OP_WRITE_ZEROES)
170 return false;
171
172 if (req_op(rq) == REQ_OP_ZONE_APPEND)
173 return false;
174
175 if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
176 return false;
177 if (rq->rq_flags & RQF_NOMERGE_FLAGS)
178 return false;
179
180 return true;
181 }
182
183 /*
184 * There are two different ways to handle DISCARD merges:
185 * 1) If max_discard_segments > 1, the driver treats every bio as a range and
186 * send the bios to controller together. The ranges don't need to be
187 * contiguous.
188 * 2) Otherwise, the request will be normal read/write requests. The ranges
189 * need to be contiguous.
190 */
blk_discard_mergable(struct request * req)191 static inline bool blk_discard_mergable(struct request *req)
192 {
193 if (req_op(req) == REQ_OP_DISCARD &&
194 queue_max_discard_segments(req->q) > 1)
195 return true;
196 return false;
197 }
198
blk_rq_get_max_segments(struct request * rq)199 static inline unsigned int blk_rq_get_max_segments(struct request *rq)
200 {
201 if (req_op(rq) == REQ_OP_DISCARD)
202 return queue_max_discard_segments(rq->q);
203 return queue_max_segments(rq->q);
204 }
205
blk_queue_get_max_sectors(struct request * rq)206 static inline unsigned int blk_queue_get_max_sectors(struct request *rq)
207 {
208 struct request_queue *q = rq->q;
209 enum req_op op = req_op(rq);
210
211 if (unlikely(op == REQ_OP_DISCARD))
212 return min(q->limits.max_discard_sectors,
213 UINT_MAX >> SECTOR_SHIFT);
214
215 if (unlikely(op == REQ_OP_SECURE_ERASE))
216 return min(q->limits.max_secure_erase_sectors,
217 UINT_MAX >> SECTOR_SHIFT);
218
219 if (unlikely(op == REQ_OP_WRITE_ZEROES))
220 return q->limits.max_write_zeroes_sectors;
221
222 if (rq->cmd_flags & REQ_ATOMIC)
223 return q->limits.atomic_write_max_sectors;
224
225 return q->limits.max_sectors;
226 }
227
228 #ifdef CONFIG_BLK_DEV_INTEGRITY
229 void blk_flush_integrity(void);
230 void bio_integrity_free(struct bio *bio);
231
232 /*
233 * Integrity payloads can either be owned by the submitter, in which case
234 * bio_uninit will free them, or owned and generated by the block layer,
235 * in which case we'll verify them here (for reads) and free them before
236 * the bio is handed back to the submitted.
237 */
238 bool __bio_integrity_endio(struct bio *bio);
bio_integrity_endio(struct bio * bio)239 static inline bool bio_integrity_endio(struct bio *bio)
240 {
241 struct bio_integrity_payload *bip = bio_integrity(bio);
242
243 if (bip && (bip->bip_flags & BIP_BLOCK_INTEGRITY))
244 return __bio_integrity_endio(bio);
245 return true;
246 }
247
248 bool blk_integrity_merge_rq(struct request_queue *, struct request *,
249 struct request *);
250 bool blk_integrity_merge_bio(struct request_queue *, struct request *,
251 struct bio *);
252
integrity_req_gap_back_merge(struct request * req,struct bio * next)253 static inline bool integrity_req_gap_back_merge(struct request *req,
254 struct bio *next)
255 {
256 struct bio_integrity_payload *bip = bio_integrity(req->bio);
257 struct bio_integrity_payload *bip_next = bio_integrity(next);
258
259 return bvec_gap_to_prev(&req->q->limits,
260 &bip->bip_vec[bip->bip_vcnt - 1],
261 bip_next->bip_vec[0].bv_offset);
262 }
263
integrity_req_gap_front_merge(struct request * req,struct bio * bio)264 static inline bool integrity_req_gap_front_merge(struct request *req,
265 struct bio *bio)
266 {
267 struct bio_integrity_payload *bip = bio_integrity(bio);
268 struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
269
270 return bvec_gap_to_prev(&req->q->limits,
271 &bip->bip_vec[bip->bip_vcnt - 1],
272 bip_next->bip_vec[0].bv_offset);
273 }
274
275 extern const struct attribute_group blk_integrity_attr_group;
276 #else /* CONFIG_BLK_DEV_INTEGRITY */
blk_integrity_merge_rq(struct request_queue * rq,struct request * r1,struct request * r2)277 static inline bool blk_integrity_merge_rq(struct request_queue *rq,
278 struct request *r1, struct request *r2)
279 {
280 return true;
281 }
blk_integrity_merge_bio(struct request_queue * rq,struct request * r,struct bio * b)282 static inline bool blk_integrity_merge_bio(struct request_queue *rq,
283 struct request *r, struct bio *b)
284 {
285 return true;
286 }
integrity_req_gap_back_merge(struct request * req,struct bio * next)287 static inline bool integrity_req_gap_back_merge(struct request *req,
288 struct bio *next)
289 {
290 return false;
291 }
integrity_req_gap_front_merge(struct request * req,struct bio * bio)292 static inline bool integrity_req_gap_front_merge(struct request *req,
293 struct bio *bio)
294 {
295 return false;
296 }
297
blk_flush_integrity(void)298 static inline void blk_flush_integrity(void)
299 {
300 }
bio_integrity_endio(struct bio * bio)301 static inline bool bio_integrity_endio(struct bio *bio)
302 {
303 return true;
304 }
bio_integrity_free(struct bio * bio)305 static inline void bio_integrity_free(struct bio *bio)
306 {
307 }
308 #endif /* CONFIG_BLK_DEV_INTEGRITY */
309
310 unsigned long blk_rq_timeout(unsigned long timeout);
311 void blk_add_timer(struct request *req);
312
313 enum bio_merge_status {
314 BIO_MERGE_OK,
315 BIO_MERGE_NONE,
316 BIO_MERGE_FAILED,
317 };
318
319 enum bio_merge_status bio_attempt_back_merge(struct request *req,
320 struct bio *bio, unsigned int nr_segs);
321 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
322 unsigned int nr_segs);
323 bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
324 struct bio *bio, unsigned int nr_segs);
325
326 /*
327 * Plug flush limits
328 */
329 #define BLK_MAX_REQUEST_COUNT 32
330 #define BLK_PLUG_FLUSH_SIZE (128 * 1024)
331
332 /*
333 * Internal elevator interface
334 */
335 #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
336
337 bool blk_insert_flush(struct request *rq);
338
339 void elv_update_nr_hw_queues(struct request_queue *q,
340 struct elv_change_ctx *ctx);
341 void elevator_set_default(struct request_queue *q);
342 void elevator_set_none(struct request_queue *q);
343
344 ssize_t part_size_show(struct device *dev, struct device_attribute *attr,
345 char *buf);
346 ssize_t part_stat_show(struct device *dev, struct device_attribute *attr,
347 char *buf);
348 ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
349 char *buf);
350 ssize_t part_fail_show(struct device *dev, struct device_attribute *attr,
351 char *buf);
352 ssize_t part_fail_store(struct device *dev, struct device_attribute *attr,
353 const char *buf, size_t count);
354 ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
355 ssize_t part_timeout_store(struct device *, struct device_attribute *,
356 const char *, size_t);
357
358 struct bio *bio_split_discard(struct bio *bio, const struct queue_limits *lim,
359 unsigned *nsegs);
360 struct bio *bio_split_write_zeroes(struct bio *bio,
361 const struct queue_limits *lim, unsigned *nsegs);
362 struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
363 unsigned *nr_segs);
364 struct bio *bio_split_zone_append(struct bio *bio,
365 const struct queue_limits *lim, unsigned *nr_segs);
366
367 /*
368 * All drivers must accept single-segments bios that are smaller than PAGE_SIZE.
369 *
370 * This is a quick and dirty check that relies on the fact that bi_io_vec[0] is
371 * always valid if a bio has data. The check might lead to occasional false
372 * positives when bios are cloned, but compared to the performance impact of
373 * cloned bios themselves the loop below doesn't matter anyway.
374 */
bio_may_need_split(struct bio * bio,const struct queue_limits * lim)375 static inline bool bio_may_need_split(struct bio *bio,
376 const struct queue_limits *lim)
377 {
378 const struct bio_vec *bv;
379
380 if (lim->chunk_sectors)
381 return true;
382
383 if (!bio->bi_io_vec)
384 return true;
385
386 bv = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
387 if (bio->bi_iter.bi_size > bv->bv_len - bio->bi_iter.bi_bvec_done)
388 return true;
389 return bv->bv_len + bv->bv_offset > lim->max_fast_segment_size;
390 }
391
392 /**
393 * __bio_split_to_limits - split a bio to fit the queue limits
394 * @bio: bio to be split
395 * @lim: queue limits to split based on
396 * @nr_segs: returns the number of segments in the returned bio
397 *
398 * Check if @bio needs splitting based on the queue limits, and if so split off
399 * a bio fitting the limits from the beginning of @bio and return it. @bio is
400 * shortened to the remainder and re-submitted.
401 *
402 * The split bio is allocated from @q->bio_split, which is provided by the
403 * block layer.
404 */
__bio_split_to_limits(struct bio * bio,const struct queue_limits * lim,unsigned int * nr_segs)405 static inline struct bio *__bio_split_to_limits(struct bio *bio,
406 const struct queue_limits *lim, unsigned int *nr_segs)
407 {
408 switch (bio_op(bio)) {
409 case REQ_OP_READ:
410 case REQ_OP_WRITE:
411 if (bio_may_need_split(bio, lim))
412 return bio_split_rw(bio, lim, nr_segs);
413 *nr_segs = 1;
414 return bio;
415 case REQ_OP_ZONE_APPEND:
416 return bio_split_zone_append(bio, lim, nr_segs);
417 case REQ_OP_DISCARD:
418 case REQ_OP_SECURE_ERASE:
419 return bio_split_discard(bio, lim, nr_segs);
420 case REQ_OP_WRITE_ZEROES:
421 return bio_split_write_zeroes(bio, lim, nr_segs);
422 default:
423 /* other operations can't be split */
424 *nr_segs = 0;
425 return bio;
426 }
427 }
428
429 /**
430 * get_max_segment_size() - maximum number of bytes to add as a single segment
431 * @lim: Request queue limits.
432 * @paddr: address of the range to add
433 * @len: maximum length available to add at @paddr
434 *
435 * Returns the maximum number of bytes of the range starting at @paddr that can
436 * be added to a single segment.
437 */
get_max_segment_size(const struct queue_limits * lim,phys_addr_t paddr,unsigned int len)438 static inline unsigned get_max_segment_size(const struct queue_limits *lim,
439 phys_addr_t paddr, unsigned int len)
440 {
441 /*
442 * Prevent an overflow if mask = ULONG_MAX and offset = 0 by adding 1
443 * after having calculated the minimum.
444 */
445 return min_t(unsigned long, len,
446 min(lim->seg_boundary_mask - (lim->seg_boundary_mask & paddr),
447 (unsigned long)lim->max_segment_size - 1) + 1);
448 }
449
450 int ll_back_merge_fn(struct request *req, struct bio *bio,
451 unsigned int nr_segs);
452 bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
453 struct request *next);
454 unsigned int blk_recalc_rq_segments(struct request *rq);
455 bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
456 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
457
458 int blk_set_default_limits(struct queue_limits *lim);
459 void blk_apply_bdi_limits(struct backing_dev_info *bdi,
460 struct queue_limits *lim);
461 int blk_dev_init(void);
462
463 void update_io_ticks(struct block_device *part, unsigned long now, bool end);
464
req_set_nomerge(struct request_queue * q,struct request * req)465 static inline void req_set_nomerge(struct request_queue *q, struct request *req)
466 {
467 req->cmd_flags |= REQ_NOMERGE;
468 if (req == q->last_merge)
469 q->last_merge = NULL;
470 }
471
472 /*
473 * Internal io_context interface
474 */
475 struct io_cq *ioc_find_get_icq(struct request_queue *q);
476 struct io_cq *ioc_lookup_icq(struct request_queue *q);
477 #ifdef CONFIG_BLK_ICQ
478 void ioc_clear_queue(struct request_queue *q);
479 #else
ioc_clear_queue(struct request_queue * q)480 static inline void ioc_clear_queue(struct request_queue *q)
481 {
482 }
483 #endif /* CONFIG_BLK_ICQ */
484
485 #ifdef CONFIG_BLK_DEV_ZONED
486 void disk_init_zone_resources(struct gendisk *disk);
487 void disk_free_zone_resources(struct gendisk *disk);
bio_zone_write_plugging(struct bio * bio)488 static inline bool bio_zone_write_plugging(struct bio *bio)
489 {
490 return bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING);
491 }
blk_req_bio_is_zone_append(struct request * rq,struct bio * bio)492 static inline bool blk_req_bio_is_zone_append(struct request *rq,
493 struct bio *bio)
494 {
495 return req_op(rq) == REQ_OP_ZONE_APPEND ||
496 bio_flagged(bio, BIO_EMULATES_ZONE_APPEND);
497 }
498 void blk_zone_write_plug_bio_merged(struct bio *bio);
499 void blk_zone_write_plug_init_request(struct request *rq);
500 void blk_zone_append_update_request_bio(struct request *rq, struct bio *bio);
501 void blk_zone_mgmt_bio_endio(struct bio *bio);
502 void blk_zone_write_plug_bio_endio(struct bio *bio);
blk_zone_bio_endio(struct bio * bio)503 static inline void blk_zone_bio_endio(struct bio *bio)
504 {
505 /*
506 * Zone management BIOs may impact zone write plugs (e.g. a zone reset
507 * changes a zone write plug zone write pointer offset), but these
508 * operation do not go through zone write plugging as they may operate
509 * on zones that do not have a zone write
510 * plug. blk_zone_mgmt_bio_endio() handles the potential changes to zone
511 * write plugs that are present.
512 */
513 if (op_is_zone_mgmt(bio_op(bio))) {
514 blk_zone_mgmt_bio_endio(bio);
515 return;
516 }
517
518 /*
519 * For write BIOs to zoned devices, signal the completion of the BIO so
520 * that the next write BIO can be submitted by zone write plugging.
521 */
522 if (bio_zone_write_plugging(bio))
523 blk_zone_write_plug_bio_endio(bio);
524 }
525
526 void blk_zone_write_plug_finish_request(struct request *rq);
blk_zone_finish_request(struct request * rq)527 static inline void blk_zone_finish_request(struct request *rq)
528 {
529 if (rq->rq_flags & RQF_ZONE_WRITE_PLUGGING)
530 blk_zone_write_plug_finish_request(rq);
531 }
532 int blkdev_report_zones_ioctl(struct block_device *bdev, unsigned int cmd,
533 unsigned long arg);
534 int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
535 unsigned int cmd, unsigned long arg);
536 #else /* CONFIG_BLK_DEV_ZONED */
disk_init_zone_resources(struct gendisk * disk)537 static inline void disk_init_zone_resources(struct gendisk *disk)
538 {
539 }
disk_free_zone_resources(struct gendisk * disk)540 static inline void disk_free_zone_resources(struct gendisk *disk)
541 {
542 }
bio_zone_write_plugging(struct bio * bio)543 static inline bool bio_zone_write_plugging(struct bio *bio)
544 {
545 return false;
546 }
blk_req_bio_is_zone_append(struct request * req,struct bio * bio)547 static inline bool blk_req_bio_is_zone_append(struct request *req,
548 struct bio *bio)
549 {
550 return false;
551 }
blk_zone_write_plug_bio_merged(struct bio * bio)552 static inline void blk_zone_write_plug_bio_merged(struct bio *bio)
553 {
554 }
blk_zone_write_plug_init_request(struct request * rq)555 static inline void blk_zone_write_plug_init_request(struct request *rq)
556 {
557 }
blk_zone_append_update_request_bio(struct request * rq,struct bio * bio)558 static inline void blk_zone_append_update_request_bio(struct request *rq,
559 struct bio *bio)
560 {
561 }
blk_zone_bio_endio(struct bio * bio)562 static inline void blk_zone_bio_endio(struct bio *bio)
563 {
564 }
blk_zone_finish_request(struct request * rq)565 static inline void blk_zone_finish_request(struct request *rq)
566 {
567 }
blkdev_report_zones_ioctl(struct block_device * bdev,unsigned int cmd,unsigned long arg)568 static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
569 unsigned int cmd, unsigned long arg)
570 {
571 return -ENOTTY;
572 }
blkdev_zone_mgmt_ioctl(struct block_device * bdev,blk_mode_t mode,unsigned int cmd,unsigned long arg)573 static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev,
574 blk_mode_t mode, unsigned int cmd, unsigned long arg)
575 {
576 return -ENOTTY;
577 }
578 #endif /* CONFIG_BLK_DEV_ZONED */
579
580 struct block_device *bdev_alloc(struct gendisk *disk, u8 partno);
581 void bdev_add(struct block_device *bdev, dev_t dev);
582 void bdev_unhash(struct block_device *bdev);
583 void bdev_drop(struct block_device *bdev);
584
585 int blk_alloc_ext_minor(void);
586 void blk_free_ext_minor(unsigned int minor);
587 #define ADDPART_FLAG_NONE 0
588 #define ADDPART_FLAG_RAID 1
589 #define ADDPART_FLAG_WHOLEDISK 2
590 #define ADDPART_FLAG_READONLY 4
591 int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
592 sector_t length);
593 int bdev_del_partition(struct gendisk *disk, int partno);
594 int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start,
595 sector_t length);
596 void drop_partition(struct block_device *part);
597
598 void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors);
599
600 struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
601 struct lock_class_key *lkclass);
602 struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id);
603
604 int disk_scan_partitions(struct gendisk *disk, blk_mode_t mode);
605
606 int disk_alloc_events(struct gendisk *disk);
607 void disk_add_events(struct gendisk *disk);
608 void disk_del_events(struct gendisk *disk);
609 void disk_release_events(struct gendisk *disk);
610 void disk_block_events(struct gendisk *disk);
611 void disk_unblock_events(struct gendisk *disk);
612 void disk_flush_events(struct gendisk *disk, unsigned int mask);
613 extern struct device_attribute dev_attr_events;
614 extern struct device_attribute dev_attr_events_async;
615 extern struct device_attribute dev_attr_events_poll_msecs;
616
617 extern struct attribute_group blk_trace_attr_group;
618
619 blk_mode_t file_to_blk_mode(struct file *file);
620 int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode,
621 loff_t lstart, loff_t lend);
622 long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
623 int blkdev_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags);
624 long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
625
626 extern const struct address_space_operations def_blk_aops;
627
628 int disk_register_independent_access_ranges(struct gendisk *disk);
629 void disk_unregister_independent_access_ranges(struct gendisk *disk);
630
631 int should_fail_bio(struct bio *bio);
632 #ifdef CONFIG_FAIL_MAKE_REQUEST
633 bool should_fail_request(struct block_device *part, unsigned int bytes);
634 #else /* CONFIG_FAIL_MAKE_REQUEST */
should_fail_request(struct block_device * part,unsigned int bytes)635 static inline bool should_fail_request(struct block_device *part,
636 unsigned int bytes)
637 {
638 return false;
639 }
640 #endif /* CONFIG_FAIL_MAKE_REQUEST */
641
642 /*
643 * Optimized request reference counting. Ideally we'd make timeouts be more
644 * clever, as that's the only reason we need references at all... But until
645 * this happens, this is faster than using refcount_t. Also see:
646 *
647 * abc54d634334 ("io_uring: switch to atomic_t for io_kiocb reference count")
648 */
649 #define req_ref_zero_or_close_to_overflow(req) \
650 ((unsigned int) atomic_read(&(req->ref)) + 127u <= 127u)
651
req_ref_inc_not_zero(struct request * req)652 static inline bool req_ref_inc_not_zero(struct request *req)
653 {
654 return atomic_inc_not_zero(&req->ref);
655 }
656
req_ref_put_and_test(struct request * req)657 static inline bool req_ref_put_and_test(struct request *req)
658 {
659 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
660 return atomic_dec_and_test(&req->ref);
661 }
662
req_ref_set(struct request * req,int value)663 static inline void req_ref_set(struct request *req, int value)
664 {
665 atomic_set(&req->ref, value);
666 }
667
req_ref_read(struct request * req)668 static inline int req_ref_read(struct request *req)
669 {
670 return atomic_read(&req->ref);
671 }
672
blk_time_get_ns(void)673 static inline u64 blk_time_get_ns(void)
674 {
675 struct blk_plug *plug = current->plug;
676
677 if (!plug || !in_task())
678 return ktime_get_ns();
679
680 /*
681 * 0 could very well be a valid time, but rather than flag "this is
682 * a valid timestamp" separately, just accept that we'll do an extra
683 * ktime_get_ns() if we just happen to get 0 as the current time.
684 */
685 if (!plug->cur_ktime) {
686 plug->cur_ktime = ktime_get_ns();
687 current->flags |= PF_BLOCK_TS;
688 }
689 return plug->cur_ktime;
690 }
691
blk_time_get(void)692 static inline ktime_t blk_time_get(void)
693 {
694 return ns_to_ktime(blk_time_get_ns());
695 }
696
697 void bdev_release(struct file *bdev_file);
698 int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder,
699 const struct blk_holder_ops *hops, struct file *bdev_file);
700 int bdev_permission(dev_t dev, blk_mode_t mode, void *holder);
701
702 void blk_integrity_generate(struct bio *bio);
703 void blk_integrity_verify_iter(struct bio *bio, struct bvec_iter *saved_iter);
704 void blk_integrity_prepare(struct request *rq);
705 void blk_integrity_complete(struct request *rq, unsigned int nr_bytes);
706
707 #ifdef CONFIG_LOCKDEP
blk_freeze_acquire_lock(struct request_queue * q)708 static inline void blk_freeze_acquire_lock(struct request_queue *q)
709 {
710 if (!q->mq_freeze_disk_dead)
711 rwsem_acquire(&q->io_lockdep_map, 0, 1, _RET_IP_);
712 if (!q->mq_freeze_queue_dying)
713 rwsem_acquire(&q->q_lockdep_map, 0, 1, _RET_IP_);
714 }
715
blk_unfreeze_release_lock(struct request_queue * q)716 static inline void blk_unfreeze_release_lock(struct request_queue *q)
717 {
718 if (!q->mq_freeze_queue_dying)
719 rwsem_release(&q->q_lockdep_map, _RET_IP_);
720 if (!q->mq_freeze_disk_dead)
721 rwsem_release(&q->io_lockdep_map, _RET_IP_);
722 }
723 #else
blk_freeze_acquire_lock(struct request_queue * q)724 static inline void blk_freeze_acquire_lock(struct request_queue *q)
725 {
726 }
blk_unfreeze_release_lock(struct request_queue * q)727 static inline void blk_unfreeze_release_lock(struct request_queue *q)
728 {
729 }
730 #endif
731
732 /*
733 * debugfs directory and file creation can trigger fs reclaim, which can enter
734 * back into the block layer request_queue. This can cause deadlock if the
735 * queue is frozen. Use NOIO context together with debugfs_mutex to prevent fs
736 * reclaim from triggering block I/O.
737 */
blk_debugfs_lock_nomemsave(struct request_queue * q)738 static inline void blk_debugfs_lock_nomemsave(struct request_queue *q)
739 {
740 mutex_lock(&q->debugfs_mutex);
741 }
742
blk_debugfs_unlock_nomemrestore(struct request_queue * q)743 static inline void blk_debugfs_unlock_nomemrestore(struct request_queue *q)
744 {
745 mutex_unlock(&q->debugfs_mutex);
746 }
747
blk_debugfs_lock(struct request_queue * q)748 static inline unsigned int __must_check blk_debugfs_lock(struct request_queue *q)
749 {
750 unsigned int memflags = memalloc_noio_save();
751
752 blk_debugfs_lock_nomemsave(q);
753 return memflags;
754 }
755
blk_debugfs_unlock(struct request_queue * q,unsigned int memflags)756 static inline void blk_debugfs_unlock(struct request_queue *q,
757 unsigned int memflags)
758 {
759 blk_debugfs_unlock_nomemrestore(q);
760 memalloc_noio_restore(memflags);
761 }
762
763 #endif /* BLK_INTERNAL_H */
764