1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Portions Copyright (C) 1992 Drew Eckhardt
4 */
5 #ifndef _LINUX_BLKDEV_H
6 #define _LINUX_BLKDEV_H
7
8 #include <linux/types.h>
9 #include <linux/blk_types.h>
10 #include <linux/device.h>
11 #include <linux/list.h>
12 #include <linux/llist.h>
13 #include <linux/minmax.h>
14 #include <linux/timer.h>
15 #include <linux/workqueue.h>
16 #include <linux/wait.h>
17 #include <linux/bio.h>
18 #include <linux/gfp.h>
19 #include <linux/kdev_t.h>
20 #include <linux/rcupdate.h>
21 #include <linux/percpu-refcount.h>
22 #include <linux/blkzoned.h>
23 #include <linux/sched.h>
24 #include <linux/sbitmap.h>
25 #include <linux/uuid.h>
26 #include <linux/xarray.h>
27 #include <linux/file.h>
28 #include <linux/lockdep.h>
29
30 struct module;
31 struct request_queue;
32 struct elevator_queue;
33 struct blk_trace;
34 struct request;
35 struct sg_io_hdr;
36 struct blkcg_gq;
37 struct blk_flush_queue;
38 struct kiocb;
39 struct pr_ops;
40 struct rq_qos;
41 struct blk_report_zones_args;
42 struct blk_queue_stats;
43 struct blk_stat_callback;
44 struct blk_crypto_profile;
45
46 extern const struct device_type disk_type;
47 extern const struct device_type part_type;
48 extern const struct class block_class;
49
50 /*
51 * Maximum number of blkcg policies allowed to be registered concurrently.
52 * Defined here to simplify include dependency.
53 */
54 #define BLKCG_MAX_POLS 6
55
56 #define DISK_MAX_PARTS 256
57 #define DISK_NAME_LEN 32
58
59 #define PARTITION_META_INFO_VOLNAMELTH 64
60 /*
61 * Enough for the string representation of any kind of UUID plus NULL.
62 * EFI UUID is 36 characters. MSDOS UUID is 11 characters.
63 */
64 #define PARTITION_META_INFO_UUIDLTH (UUID_STRING_LEN + 1)
65
66 struct partition_meta_info {
67 char uuid[PARTITION_META_INFO_UUIDLTH];
68 u8 volname[PARTITION_META_INFO_VOLNAMELTH];
69 };
70
71 /**
72 * DOC: genhd capability flags
73 *
74 * ``GENHD_FL_REMOVABLE``: indicates that the block device gives access to
75 * removable media. When set, the device remains present even when media is not
76 * inserted. Shall not be set for devices which are removed entirely when the
77 * media is removed.
78 *
79 * ``GENHD_FL_HIDDEN``: the block device is hidden; it doesn't produce events,
80 * doesn't appear in sysfs, and can't be opened from userspace or using
81 * blkdev_get*. Used for the underlying components of multipath devices.
82 *
83 * ``GENHD_FL_NO_PART``: partition support is disabled. The kernel will not
84 * scan for partitions from add_disk, and users can't add partitions manually.
85 *
86 */
87 enum {
88 GENHD_FL_REMOVABLE = 1 << 0,
89 GENHD_FL_HIDDEN = 1 << 1,
90 GENHD_FL_NO_PART = 1 << 2,
91 };
92
93 enum {
94 DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */
95 DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */
96 };
97
98 enum {
99 /* Poll even if events_poll_msecs is unset */
100 DISK_EVENT_FLAG_POLL = 1 << 0,
101 /* Forward events to udev */
102 DISK_EVENT_FLAG_UEVENT = 1 << 1,
103 /* Block event polling when open for exclusive write */
104 DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE = 1 << 2,
105 };
106
107 struct disk_events;
108 struct badblocks;
109
110 enum blk_integrity_checksum {
111 BLK_INTEGRITY_CSUM_NONE = 0,
112 BLK_INTEGRITY_CSUM_IP = 1,
113 BLK_INTEGRITY_CSUM_CRC = 2,
114 BLK_INTEGRITY_CSUM_CRC64 = 3,
115 } __packed ;
116
117 struct blk_integrity {
118 unsigned char flags;
119 enum blk_integrity_checksum csum_type;
120 unsigned char metadata_size;
121 unsigned char pi_offset;
122 unsigned char interval_exp;
123 unsigned char tag_size;
124 unsigned char pi_tuple_size;
125 };
126
127 typedef unsigned int __bitwise blk_mode_t;
128
129 /* open for reading */
130 #define BLK_OPEN_READ ((__force blk_mode_t)(1 << 0))
131 /* open for writing */
132 #define BLK_OPEN_WRITE ((__force blk_mode_t)(1 << 1))
133 /* open exclusively (vs other exclusive openers */
134 #define BLK_OPEN_EXCL ((__force blk_mode_t)(1 << 2))
135 /* opened with O_NDELAY */
136 #define BLK_OPEN_NDELAY ((__force blk_mode_t)(1 << 3))
137 /* open for "writes" only for ioctls (specialy hack for floppy.c) */
138 #define BLK_OPEN_WRITE_IOCTL ((__force blk_mode_t)(1 << 4))
139 /* open is exclusive wrt all other BLK_OPEN_WRITE opens to the device */
140 #define BLK_OPEN_RESTRICT_WRITES ((__force blk_mode_t)(1 << 5))
141 /* return partition scanning errors */
142 #define BLK_OPEN_STRICT_SCAN ((__force blk_mode_t)(1 << 6))
143
144 struct gendisk {
145 /*
146 * major/first_minor/minors should not be set by any new driver, the
147 * block core will take care of allocating them automatically.
148 */
149 int major;
150 int first_minor;
151 int minors;
152
153 char disk_name[DISK_NAME_LEN]; /* name of major driver */
154
155 unsigned short events; /* supported events */
156 unsigned short event_flags; /* flags related to event processing */
157
158 struct xarray part_tbl;
159 struct block_device *part0;
160
161 const struct block_device_operations *fops;
162 struct request_queue *queue;
163 void *private_data;
164
165 struct bio_set bio_split;
166
167 int flags;
168 unsigned long state;
169 #define GD_NEED_PART_SCAN 0
170 #define GD_READ_ONLY 1
171 #define GD_DEAD 2
172 #define GD_NATIVE_CAPACITY 3
173 #define GD_ADDED 4
174 #define GD_SUPPRESS_PART_SCAN 5
175 #define GD_OWNS_QUEUE 6
176 #define GD_ZONE_APPEND_USED 7
177
178 struct mutex open_mutex; /* open/close mutex */
179 unsigned open_partitions; /* number of open partitions */
180
181 struct backing_dev_info *bdi;
182 struct kobject queue_kobj; /* the queue/ directory */
183 struct kobject *slave_dir;
184 #ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
185 struct list_head slave_bdevs;
186 #endif
187 struct timer_rand_state *random;
188 struct disk_events *ev;
189
190 #ifdef CONFIG_BLK_DEV_ZONED
191 /*
192 * Zoned block device information. Reads of this information must be
193 * protected with blk_queue_enter() / blk_queue_exit(). Modifying this
194 * information is only allowed while no requests are being processed.
195 * See also blk_mq_freeze_queue() and blk_mq_unfreeze_queue().
196 */
197 unsigned int nr_zones;
198 unsigned int zone_capacity;
199 unsigned int last_zone_capacity;
200 u8 __rcu *zones_cond;
201 unsigned int zone_wplugs_hash_bits;
202 atomic_t nr_zone_wplugs;
203 spinlock_t zone_wplugs_lock;
204 struct mempool *zone_wplugs_pool;
205 struct hlist_head *zone_wplugs_hash;
206 struct workqueue_struct *zone_wplugs_wq;
207 #endif /* CONFIG_BLK_DEV_ZONED */
208
209 #if IS_ENABLED(CONFIG_CDROM)
210 struct cdrom_device_info *cdi;
211 #endif
212 int node_id;
213 struct badblocks *bb;
214 struct lockdep_map lockdep_map;
215 u64 diskseq;
216 blk_mode_t open_mode;
217
218 /*
219 * Independent sector access ranges. This is always NULL for
220 * devices that do not have multiple independent access ranges.
221 */
222 struct blk_independent_access_ranges *ia_ranges;
223
224 struct mutex rqos_state_mutex; /* rqos state change mutex */
225 };
226
227 /**
228 * disk_openers - returns how many openers are there for a disk
229 * @disk: disk to check
230 *
231 * This returns the number of openers for a disk. Note that this value is only
232 * stable if disk->open_mutex is held.
233 *
234 * Note: Due to a quirk in the block layer open code, each open partition is
235 * only counted once even if there are multiple openers.
236 */
disk_openers(struct gendisk * disk)237 static inline unsigned int disk_openers(struct gendisk *disk)
238 {
239 return atomic_read(&disk->part0->bd_openers);
240 }
241
242 /**
243 * disk_has_partscan - return %true if partition scanning is enabled on a disk
244 * @disk: disk to check
245 *
246 * Returns %true if partitions scanning is enabled for @disk, or %false if
247 * partition scanning is disabled either permanently or temporarily.
248 */
disk_has_partscan(struct gendisk * disk)249 static inline bool disk_has_partscan(struct gendisk *disk)
250 {
251 return !(disk->flags & (GENHD_FL_NO_PART | GENHD_FL_HIDDEN)) &&
252 !test_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
253 }
254
255 /*
256 * The gendisk is refcounted by the part0 block_device, and the bd_device
257 * therein is also used for device model presentation in sysfs.
258 */
259 #define dev_to_disk(device) \
260 (dev_to_bdev(device)->bd_disk)
261 #define disk_to_dev(disk) \
262 (&((disk)->part0->bd_device))
263
264 #if IS_REACHABLE(CONFIG_CDROM)
265 #define disk_to_cdi(disk) ((disk)->cdi)
266 #else
267 #define disk_to_cdi(disk) NULL
268 #endif
269
disk_devt(struct gendisk * disk)270 static inline dev_t disk_devt(struct gendisk *disk)
271 {
272 return MKDEV(disk->major, disk->first_minor);
273 }
274
275 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
276 /*
277 * We should strive for 1 << (PAGE_SHIFT + MAX_PAGECACHE_ORDER)
278 * however we constrain this to what we can validate and test.
279 */
280 #define BLK_MAX_BLOCK_SIZE SZ_64K
281 #else
282 #define BLK_MAX_BLOCK_SIZE PAGE_SIZE
283 #endif
284
285
286 /* blk_validate_limits() validates bsize, so drivers don't usually need to */
blk_validate_block_size(unsigned long bsize)287 static inline int blk_validate_block_size(unsigned long bsize)
288 {
289 if (bsize < 512 || bsize > BLK_MAX_BLOCK_SIZE || !is_power_of_2(bsize))
290 return -EINVAL;
291
292 return 0;
293 }
294
blk_op_is_passthrough(blk_opf_t op)295 static inline bool blk_op_is_passthrough(blk_opf_t op)
296 {
297 op &= REQ_OP_MASK;
298 return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT;
299 }
300
301 /* flags set by the driver in queue_limits.features */
302 typedef unsigned int __bitwise blk_features_t;
303
304 /* supports a volatile write cache */
305 #define BLK_FEAT_WRITE_CACHE ((__force blk_features_t)(1u << 0))
306
307 /* supports passing on the FUA bit */
308 #define BLK_FEAT_FUA ((__force blk_features_t)(1u << 1))
309
310 /* rotational device (hard drive or floppy) */
311 #define BLK_FEAT_ROTATIONAL ((__force blk_features_t)(1u << 2))
312
313 /* contributes to the random number pool */
314 #define BLK_FEAT_ADD_RANDOM ((__force blk_features_t)(1u << 3))
315
316 /* do disk/partitions IO accounting */
317 #define BLK_FEAT_IO_STAT ((__force blk_features_t)(1u << 4))
318
319 /* don't modify data until writeback is done */
320 #define BLK_FEAT_STABLE_WRITES ((__force blk_features_t)(1u << 5))
321
322 /* always completes in submit context */
323 #define BLK_FEAT_SYNCHRONOUS ((__force blk_features_t)(1u << 6))
324
325 /* supports REQ_NOWAIT */
326 #define BLK_FEAT_NOWAIT ((__force blk_features_t)(1u << 7))
327
328 /* supports DAX */
329 #define BLK_FEAT_DAX ((__force blk_features_t)(1u << 8))
330
331 /* supports I/O polling */
332 #define BLK_FEAT_POLL ((__force blk_features_t)(1u << 9))
333
334 /* is a zoned device */
335 #define BLK_FEAT_ZONED ((__force blk_features_t)(1u << 10))
336
337 /* supports PCI(e) p2p requests */
338 #define BLK_FEAT_PCI_P2PDMA ((__force blk_features_t)(1u << 12))
339
340 /* skip this queue in blk_mq_(un)quiesce_tagset */
341 #define BLK_FEAT_SKIP_TAGSET_QUIESCE ((__force blk_features_t)(1u << 13))
342
343 /* atomic writes enabled */
344 #define BLK_FEAT_ATOMIC_WRITES ((__force blk_features_t)(1u << 14))
345
346 /* undocumented magic for bcache */
347 #define BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE \
348 ((__force blk_features_t)(1u << 15))
349
350 /*
351 * Flags automatically inherited when stacking limits.
352 */
353 #define BLK_FEAT_INHERIT_MASK \
354 (BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA | BLK_FEAT_ROTATIONAL | \
355 BLK_FEAT_STABLE_WRITES | BLK_FEAT_ZONED | \
356 BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE)
357
358 /* internal flags in queue_limits.flags */
359 typedef unsigned int __bitwise blk_flags_t;
360
361 /* do not send FLUSH/FUA commands despite advertising a write cache */
362 #define BLK_FLAG_WRITE_CACHE_DISABLED ((__force blk_flags_t)(1u << 0))
363
364 /* I/O topology is misaligned */
365 #define BLK_FLAG_MISALIGNED ((__force blk_flags_t)(1u << 1))
366
367 /* passthrough command IO accounting */
368 #define BLK_FLAG_IOSTATS_PASSTHROUGH ((__force blk_flags_t)(1u << 2))
369
370 struct queue_limits {
371 blk_features_t features;
372 blk_flags_t flags;
373 unsigned long seg_boundary_mask;
374 unsigned long virt_boundary_mask;
375
376 unsigned int max_hw_sectors;
377 unsigned int max_dev_sectors;
378 unsigned int chunk_sectors;
379 unsigned int max_sectors;
380 unsigned int max_user_sectors;
381 unsigned int max_segment_size;
382 unsigned int max_fast_segment_size;
383 unsigned int physical_block_size;
384 unsigned int logical_block_size;
385 unsigned int alignment_offset;
386 unsigned int io_min;
387 unsigned int io_opt;
388 unsigned int max_discard_sectors;
389 unsigned int max_hw_discard_sectors;
390 unsigned int max_user_discard_sectors;
391 unsigned int max_secure_erase_sectors;
392 unsigned int max_write_zeroes_sectors;
393 unsigned int max_wzeroes_unmap_sectors;
394 unsigned int max_hw_wzeroes_unmap_sectors;
395 unsigned int max_user_wzeroes_unmap_sectors;
396 unsigned int max_hw_zone_append_sectors;
397 unsigned int max_zone_append_sectors;
398 unsigned int discard_granularity;
399 unsigned int discard_alignment;
400 unsigned int zone_write_granularity;
401
402 /* atomic write limits */
403 unsigned int atomic_write_hw_max;
404 unsigned int atomic_write_max_sectors;
405 unsigned int atomic_write_hw_boundary;
406 unsigned int atomic_write_boundary_sectors;
407 unsigned int atomic_write_hw_unit_min;
408 unsigned int atomic_write_unit_min;
409 unsigned int atomic_write_hw_unit_max;
410 unsigned int atomic_write_unit_max;
411
412 unsigned short max_segments;
413 unsigned short max_integrity_segments;
414 unsigned short max_discard_segments;
415
416 unsigned short max_write_streams;
417 unsigned int write_stream_granularity;
418
419 unsigned int max_open_zones;
420 unsigned int max_active_zones;
421
422 /*
423 * Drivers that set dma_alignment to less than 511 must be prepared to
424 * handle individual bvec's that are not a multiple of a SECTOR_SIZE
425 * due to possible offsets.
426 */
427 unsigned int dma_alignment;
428 unsigned int dma_pad_mask;
429
430 struct blk_integrity integrity;
431 };
432
433 typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
434 void *data);
435
436 int disk_report_zone(struct gendisk *disk, struct blk_zone *zone,
437 unsigned int idx, struct blk_report_zones_args *args);
438
439 int blkdev_get_zone_info(struct block_device *bdev, sector_t sector,
440 struct blk_zone *zone);
441
442 #define BLK_ALL_ZONES ((unsigned int)-1)
443 int blkdev_report_zones(struct block_device *bdev, sector_t sector,
444 unsigned int nr_zones, report_zones_cb cb, void *data);
445 int blkdev_report_zones_cached(struct block_device *bdev, sector_t sector,
446 unsigned int nr_zones, report_zones_cb cb, void *data);
447 int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op,
448 sector_t sectors, sector_t nr_sectors);
449 int blk_revalidate_disk_zones(struct gendisk *disk);
450
451 /*
452 * Independent access ranges: struct blk_independent_access_range describes
453 * a range of contiguous sectors that can be accessed using device command
454 * execution resources that are independent from the resources used for
455 * other access ranges. This is typically found with single-LUN multi-actuator
456 * HDDs where each access range is served by a different set of heads.
457 * The set of independent ranges supported by the device is defined using
458 * struct blk_independent_access_ranges. The independent ranges must not overlap
459 * and must include all sectors within the disk capacity (no sector holes
460 * allowed).
461 * For a device with multiple ranges, requests targeting sectors in different
462 * ranges can be executed in parallel. A request can straddle an access range
463 * boundary.
464 */
465 struct blk_independent_access_range {
466 struct kobject kobj;
467 sector_t sector;
468 sector_t nr_sectors;
469 };
470
471 struct blk_independent_access_ranges {
472 struct kobject kobj;
473 bool sysfs_registered;
474 unsigned int nr_ia_ranges;
475 struct blk_independent_access_range ia_range[];
476 };
477
478 struct request_queue {
479 /*
480 * The queue owner gets to use this for whatever they like.
481 * ll_rw_blk doesn't touch it.
482 */
483 void *queuedata;
484
485 struct elevator_queue *elevator;
486
487 const struct blk_mq_ops *mq_ops;
488
489 /* sw queues */
490 struct blk_mq_ctx __percpu *queue_ctx;
491
492 /*
493 * various queue flags, see QUEUE_* below
494 */
495 unsigned long queue_flags;
496
497 unsigned int __data_racy rq_timeout;
498
499 unsigned int queue_depth;
500
501 refcount_t refs;
502
503 /* hw dispatch queues */
504 unsigned int nr_hw_queues;
505 struct blk_mq_hw_ctx * __rcu *queue_hw_ctx;
506
507 struct percpu_ref q_usage_counter;
508 struct lock_class_key io_lock_cls_key;
509 struct lockdep_map io_lockdep_map;
510
511 struct lock_class_key q_lock_cls_key;
512 struct lockdep_map q_lockdep_map;
513
514 struct request *last_merge;
515
516 spinlock_t queue_lock;
517
518 int quiesce_depth;
519
520 struct gendisk *disk;
521
522 /*
523 * mq queue kobject
524 */
525 struct kobject *mq_kobj;
526
527 struct queue_limits limits;
528
529 #ifdef CONFIG_PM
530 struct device *dev;
531 enum rpm_status rpm_status;
532 #endif
533
534 /*
535 * Number of contexts that have called blk_set_pm_only(). If this
536 * counter is above zero then only RQF_PM requests are processed.
537 */
538 atomic_t pm_only;
539
540 struct blk_queue_stats *stats;
541 struct rq_qos *rq_qos;
542 struct mutex rq_qos_mutex;
543
544 /*
545 * ida allocated id for this queue. Used to index queues from
546 * ioctx.
547 */
548 int id;
549
550 /*
551 * queue settings
552 */
553 unsigned int nr_requests; /* Max # of requests */
554 unsigned int async_depth; /* Max # of async requests */
555
556 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
557 struct blk_crypto_profile *crypto_profile;
558 struct kobject *crypto_kobject;
559 #endif
560
561 struct timer_list timeout;
562 struct work_struct timeout_work;
563
564 atomic_t nr_active_requests_shared_tags;
565
566 struct blk_mq_tags *sched_shared_tags;
567
568 struct list_head icq_list;
569 #ifdef CONFIG_BLK_CGROUP
570 DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
571 struct blkcg_gq *root_blkg;
572 struct list_head blkg_list;
573 struct mutex blkcg_mutex;
574 #endif
575
576 int node;
577
578 spinlock_t requeue_lock;
579 struct list_head requeue_list;
580 struct delayed_work requeue_work;
581
582 #ifdef CONFIG_BLK_DEV_IO_TRACE
583 struct blk_trace __rcu *blk_trace;
584 #endif
585 /*
586 * for flush operations
587 */
588 struct blk_flush_queue *fq;
589 struct list_head flush_list;
590
591 /*
592 * Protects against I/O scheduler switching, particularly when updating
593 * q->elevator. Since the elevator update code path may also modify q->
594 * nr_requests and wbt latency, this lock also protects the sysfs attrs
595 * nr_requests and wbt_lat_usec. Additionally the nr_hw_queues update
596 * may modify hctx tags, reserved-tags and cpumask, so this lock also
597 * helps protect the hctx sysfs/debugfs attrs. To ensure proper locking
598 * order during an elevator or nr_hw_queue update, first freeze the
599 * queue, then acquire ->elevator_lock.
600 */
601 struct mutex elevator_lock;
602
603 struct mutex sysfs_lock;
604 /*
605 * Protects queue limits and also sysfs attribute read_ahead_kb.
606 */
607 struct mutex limits_lock;
608
609 /*
610 * for reusing dead hctx instance in case of updating
611 * nr_hw_queues
612 */
613 struct list_head unused_hctx_list;
614 spinlock_t unused_hctx_lock;
615
616 int mq_freeze_depth;
617
618 #ifdef CONFIG_BLK_DEV_THROTTLING
619 /* Throttle data */
620 struct throtl_data *td;
621 #endif
622 struct rcu_head rcu_head;
623 #ifdef CONFIG_LOCKDEP
624 struct task_struct *mq_freeze_owner;
625 int mq_freeze_owner_depth;
626 /*
627 * Records disk & queue state in current context, used in unfreeze
628 * queue
629 */
630 bool mq_freeze_disk_dead;
631 bool mq_freeze_queue_dying;
632 #endif
633 wait_queue_head_t mq_freeze_wq;
634 /*
635 * Protect concurrent access to q_usage_counter by
636 * percpu_ref_kill() and percpu_ref_reinit().
637 */
638 struct mutex mq_freeze_lock;
639
640 struct blk_mq_tag_set *tag_set;
641 struct list_head tag_set_list;
642
643 struct dentry *debugfs_dir;
644 struct dentry *sched_debugfs_dir;
645 struct dentry *rqos_debugfs_dir;
646 /*
647 * Serializes all debugfs metadata operations using the above dentries.
648 */
649 struct mutex debugfs_mutex;
650 };
651
652 /* Keep blk_queue_flag_name[] in sync with the definitions below */
653 enum {
654 QUEUE_FLAG_DYING, /* queue being torn down */
655 QUEUE_FLAG_NOMERGES, /* disable merge attempts */
656 QUEUE_FLAG_SAME_COMP, /* complete on same CPU-group */
657 QUEUE_FLAG_FAIL_IO, /* fake timeout */
658 QUEUE_FLAG_NOXMERGES, /* No extended merges */
659 QUEUE_FLAG_SAME_FORCE, /* force complete on same CPU */
660 QUEUE_FLAG_INIT_DONE, /* queue is initialized */
661 QUEUE_FLAG_STATS, /* track IO start and completion times */
662 QUEUE_FLAG_REGISTERED, /* queue has been registered to a disk */
663 QUEUE_FLAG_QUIESCED, /* queue has been quiesced */
664 QUEUE_FLAG_RQ_ALLOC_TIME, /* record rq->alloc_time_ns */
665 QUEUE_FLAG_HCTX_ACTIVE, /* at least one blk-mq hctx is active */
666 QUEUE_FLAG_SQ_SCHED, /* single queue style io dispatch */
667 QUEUE_FLAG_DISABLE_WBT_DEF, /* for sched to disable/enable wbt */
668 QUEUE_FLAG_NO_ELV_SWITCH, /* can't switch elevator any more */
669 QUEUE_FLAG_QOS_ENABLED, /* qos is enabled */
670 QUEUE_FLAG_BIO_ISSUE_TIME, /* record bio->issue_time_ns */
671 QUEUE_FLAG_MAX
672 };
673
674 #define QUEUE_FLAG_MQ_DEFAULT (1UL << QUEUE_FLAG_SAME_COMP)
675
676 void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
677 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
678
679 #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
680 #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
681 #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
682 #define blk_queue_noxmerges(q) \
683 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
684 #define blk_queue_rot(q) ((q)->limits.features & BLK_FEAT_ROTATIONAL)
685 #define blk_queue_io_stat(q) ((q)->limits.features & BLK_FEAT_IO_STAT)
686 #define blk_queue_passthrough_stat(q) \
687 ((q)->limits.flags & BLK_FLAG_IOSTATS_PASSTHROUGH)
688 #define blk_queue_dax(q) ((q)->limits.features & BLK_FEAT_DAX)
689 #define blk_queue_pci_p2pdma(q) ((q)->limits.features & BLK_FEAT_PCI_P2PDMA)
690 #ifdef CONFIG_BLK_RQ_ALLOC_TIME
691 #define blk_queue_rq_alloc_time(q) \
692 test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags)
693 #else
694 #define blk_queue_rq_alloc_time(q) false
695 #endif
696
697 #define blk_noretry_request(rq) \
698 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
699 REQ_FAILFAST_DRIVER))
700 #define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
701 #define blk_queue_pm_only(q) atomic_read(&(q)->pm_only)
702 #define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)
703 #define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags)
704 #define blk_queue_skip_tagset_quiesce(q) \
705 ((q)->limits.features & BLK_FEAT_SKIP_TAGSET_QUIESCE)
706 #define blk_queue_disable_wbt(q) \
707 test_bit(QUEUE_FLAG_DISABLE_WBT_DEF, &(q)->queue_flags)
708 #define blk_queue_no_elv_switch(q) \
709 test_bit(QUEUE_FLAG_NO_ELV_SWITCH, &(q)->queue_flags)
710
711 extern void blk_set_pm_only(struct request_queue *q);
712 extern void blk_clear_pm_only(struct request_queue *q);
713
714 #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
715
716 #define dma_map_bvec(dev, bv, dir, attrs) \
717 dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \
718 (dir), (attrs))
719
queue_is_mq(struct request_queue * q)720 static inline bool queue_is_mq(struct request_queue *q)
721 {
722 return q->mq_ops;
723 }
724
725 #ifdef CONFIG_PM
queue_rpm_status(struct request_queue * q)726 static inline enum rpm_status queue_rpm_status(struct request_queue *q)
727 {
728 return q->rpm_status;
729 }
730 #else
queue_rpm_status(struct request_queue * q)731 static inline enum rpm_status queue_rpm_status(struct request_queue *q)
732 {
733 return RPM_ACTIVE;
734 }
735 #endif
736
blk_queue_is_zoned(struct request_queue * q)737 static inline bool blk_queue_is_zoned(struct request_queue *q)
738 {
739 return IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
740 (q->limits.features & BLK_FEAT_ZONED);
741 }
742
disk_zone_no(struct gendisk * disk,sector_t sector)743 static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
744 {
745 if (!blk_queue_is_zoned(disk->queue))
746 return 0;
747 return sector >> ilog2(disk->queue->limits.chunk_sectors);
748 }
749
bdev_max_open_zones(struct block_device * bdev)750 static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
751 {
752 return bdev->bd_disk->queue->limits.max_open_zones;
753 }
754
bdev_max_active_zones(struct block_device * bdev)755 static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
756 {
757 return bdev->bd_disk->queue->limits.max_active_zones;
758 }
759
blk_queue_depth(struct request_queue * q)760 static inline unsigned int blk_queue_depth(struct request_queue *q)
761 {
762 if (q->queue_depth)
763 return q->queue_depth;
764
765 return q->nr_requests;
766 }
767
768 /*
769 * default timeout for SG_IO if none specified
770 */
771 #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
772 #define BLK_MIN_SG_TIMEOUT (7 * HZ)
773
774 /* This should not be used directly - use rq_for_each_segment */
775 #define for_each_bio(_bio) \
776 for (; _bio; _bio = _bio->bi_next)
777
778 int __must_check add_disk_fwnode(struct device *parent, struct gendisk *disk,
779 const struct attribute_group **groups,
780 struct fwnode_handle *fwnode);
781 int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
782 const struct attribute_group **groups);
add_disk(struct gendisk * disk)783 static inline int __must_check add_disk(struct gendisk *disk)
784 {
785 return device_add_disk(NULL, disk, NULL);
786 }
787 void del_gendisk(struct gendisk *gp);
788 void invalidate_disk(struct gendisk *disk);
789 void set_disk_ro(struct gendisk *disk, bool read_only);
790 void disk_uevent(struct gendisk *disk, enum kobject_action action);
791
bdev_partno(const struct block_device * bdev)792 static inline u8 bdev_partno(const struct block_device *bdev)
793 {
794 return atomic_read(&bdev->__bd_flags) & BD_PARTNO;
795 }
796
bdev_test_flag(const struct block_device * bdev,unsigned flag)797 static inline bool bdev_test_flag(const struct block_device *bdev, unsigned flag)
798 {
799 return atomic_read(&bdev->__bd_flags) & flag;
800 }
801
bdev_set_flag(struct block_device * bdev,unsigned flag)802 static inline void bdev_set_flag(struct block_device *bdev, unsigned flag)
803 {
804 atomic_or(flag, &bdev->__bd_flags);
805 }
806
bdev_clear_flag(struct block_device * bdev,unsigned flag)807 static inline void bdev_clear_flag(struct block_device *bdev, unsigned flag)
808 {
809 atomic_andnot(flag, &bdev->__bd_flags);
810 }
811
get_disk_ro(struct gendisk * disk)812 static inline bool get_disk_ro(struct gendisk *disk)
813 {
814 return bdev_test_flag(disk->part0, BD_READ_ONLY) ||
815 test_bit(GD_READ_ONLY, &disk->state);
816 }
817
bdev_read_only(struct block_device * bdev)818 static inline bool bdev_read_only(struct block_device *bdev)
819 {
820 return bdev_test_flag(bdev, BD_READ_ONLY) || get_disk_ro(bdev->bd_disk);
821 }
822
823 bool set_capacity_and_notify(struct gendisk *disk, sector_t size);
824 void disk_force_media_change(struct gendisk *disk);
825 void bdev_mark_dead(struct block_device *bdev, bool surprise);
826
827 void add_disk_randomness(struct gendisk *disk) __latent_entropy;
828 void rand_initialize_disk(struct gendisk *disk);
829
get_start_sect(struct block_device * bdev)830 static inline sector_t get_start_sect(struct block_device *bdev)
831 {
832 return bdev->bd_start_sect;
833 }
834
bdev_nr_sectors(struct block_device * bdev)835 static inline sector_t bdev_nr_sectors(struct block_device *bdev)
836 {
837 return bdev->bd_nr_sectors;
838 }
839
bdev_nr_bytes(struct block_device * bdev)840 static inline loff_t bdev_nr_bytes(struct block_device *bdev)
841 {
842 return (loff_t)bdev_nr_sectors(bdev) << SECTOR_SHIFT;
843 }
844
get_capacity(struct gendisk * disk)845 static inline sector_t get_capacity(struct gendisk *disk)
846 {
847 return bdev_nr_sectors(disk->part0);
848 }
849
sb_bdev_nr_blocks(struct super_block * sb)850 static inline u64 sb_bdev_nr_blocks(struct super_block *sb)
851 {
852 return bdev_nr_sectors(sb->s_bdev) >>
853 (sb->s_blocksize_bits - SECTOR_SHIFT);
854 }
855
856 #ifdef CONFIG_BLK_DEV_ZONED
disk_nr_zones(struct gendisk * disk)857 static inline unsigned int disk_nr_zones(struct gendisk *disk)
858 {
859 return disk->nr_zones;
860 }
861
862 /**
863 * bio_needs_zone_write_plugging - Check if a BIO needs to be handled with zone
864 * write plugging
865 * @bio: The BIO being submitted
866 *
867 * Return true whenever @bio execution needs to be handled through zone
868 * write plugging (using blk_zone_plug_bio()). Return false otherwise.
869 */
bio_needs_zone_write_plugging(struct bio * bio)870 static inline bool bio_needs_zone_write_plugging(struct bio *bio)
871 {
872 enum req_op op = bio_op(bio);
873
874 /*
875 * Only zoned block devices have a zone write plug hash table. But not
876 * all of them have one (e.g. DM devices may not need one).
877 */
878 if (!bio->bi_bdev->bd_disk->zone_wplugs_hash)
879 return false;
880
881 /* Only write operations need zone write plugging. */
882 if (!op_is_write(op))
883 return false;
884
885 /* Ignore empty flush */
886 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
887 return false;
888
889 /* Ignore BIOs that already have been handled by zone write plugging. */
890 if (bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING))
891 return false;
892
893 /*
894 * All zone write operations must be handled through zone write plugging
895 * using blk_zone_plug_bio().
896 */
897 switch (op) {
898 case REQ_OP_ZONE_APPEND:
899 case REQ_OP_WRITE:
900 case REQ_OP_WRITE_ZEROES:
901 case REQ_OP_ZONE_FINISH:
902 case REQ_OP_ZONE_RESET:
903 case REQ_OP_ZONE_RESET_ALL:
904 return true;
905 default:
906 return false;
907 }
908 }
909
910 bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs);
911
912 /**
913 * disk_zone_capacity - returns the zone capacity of zone containing @sector
914 * @disk: disk to work with
915 * @sector: sector number within the querying zone
916 *
917 * Returns the zone capacity of a zone containing @sector. @sector can be any
918 * sector in the zone.
919 */
disk_zone_capacity(struct gendisk * disk,sector_t sector)920 static inline unsigned int disk_zone_capacity(struct gendisk *disk,
921 sector_t sector)
922 {
923 sector_t zone_sectors = disk->queue->limits.chunk_sectors;
924
925 if (sector + zone_sectors >= get_capacity(disk))
926 return disk->last_zone_capacity;
927 return disk->zone_capacity;
928 }
bdev_zone_capacity(struct block_device * bdev,sector_t pos)929 static inline unsigned int bdev_zone_capacity(struct block_device *bdev,
930 sector_t pos)
931 {
932 return disk_zone_capacity(bdev->bd_disk, pos);
933 }
934
935 bool bdev_zone_is_seq(struct block_device *bdev, sector_t sector);
936
937 #else /* CONFIG_BLK_DEV_ZONED */
disk_nr_zones(struct gendisk * disk)938 static inline unsigned int disk_nr_zones(struct gendisk *disk)
939 {
940 return 0;
941 }
942
bdev_zone_is_seq(struct block_device * bdev,sector_t sector)943 static inline bool bdev_zone_is_seq(struct block_device *bdev, sector_t sector)
944 {
945 return false;
946 }
947
bio_needs_zone_write_plugging(struct bio * bio)948 static inline bool bio_needs_zone_write_plugging(struct bio *bio)
949 {
950 return false;
951 }
952
blk_zone_plug_bio(struct bio * bio,unsigned int nr_segs)953 static inline bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
954 {
955 return false;
956 }
957 #endif /* CONFIG_BLK_DEV_ZONED */
958
bdev_nr_zones(struct block_device * bdev)959 static inline unsigned int bdev_nr_zones(struct block_device *bdev)
960 {
961 return disk_nr_zones(bdev->bd_disk);
962 }
963
964 int bdev_disk_changed(struct gendisk *disk, bool invalidate);
965
966 void put_disk(struct gendisk *disk);
967 struct gendisk *__blk_alloc_disk(struct queue_limits *lim, int node,
968 struct lock_class_key *lkclass);
969
970 /**
971 * blk_alloc_disk - allocate a gendisk structure
972 * @lim: queue limits to be used for this disk.
973 * @node_id: numa node to allocate on
974 *
975 * Allocate and pre-initialize a gendisk structure for use with BIO based
976 * drivers.
977 *
978 * Returns an ERR_PTR on error, else the allocated disk.
979 *
980 * Context: can sleep
981 */
982 #define blk_alloc_disk(lim, node_id) \
983 ({ \
984 static struct lock_class_key __key; \
985 \
986 __blk_alloc_disk(lim, node_id, &__key); \
987 })
988
989 int __register_blkdev(unsigned int major, const char *name,
990 void (*probe)(dev_t devt));
991 #define register_blkdev(major, name) \
992 __register_blkdev(major, name, NULL)
993 void unregister_blkdev(unsigned int major, const char *name);
994
995 bool disk_check_media_change(struct gendisk *disk);
996 void set_capacity(struct gendisk *disk, sector_t size);
997
998 #ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
999 int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
1000 void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk);
1001 #else
bd_link_disk_holder(struct block_device * bdev,struct gendisk * disk)1002 static inline int bd_link_disk_holder(struct block_device *bdev,
1003 struct gendisk *disk)
1004 {
1005 return 0;
1006 }
bd_unlink_disk_holder(struct block_device * bdev,struct gendisk * disk)1007 static inline void bd_unlink_disk_holder(struct block_device *bdev,
1008 struct gendisk *disk)
1009 {
1010 }
1011 #endif /* CONFIG_BLOCK_HOLDER_DEPRECATED */
1012
1013 dev_t part_devt(struct gendisk *disk, u8 partno);
1014 void inc_diskseq(struct gendisk *disk);
1015 void blk_request_module(dev_t devt);
1016
1017 extern int blk_register_queue(struct gendisk *disk);
1018 extern void blk_unregister_queue(struct gendisk *disk);
1019 void submit_bio_noacct(struct bio *bio);
1020 struct bio *bio_split_to_limits(struct bio *bio);
1021 struct bio *bio_submit_split_bioset(struct bio *bio, unsigned int split_sectors,
1022 struct bio_set *bs);
1023
1024 extern int blk_lld_busy(struct request_queue *q);
1025 extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
1026 extern void blk_queue_exit(struct request_queue *q);
1027 extern void blk_sync_queue(struct request_queue *q);
1028
1029 /* Convert a request operation REQ_OP_name into the string "name" */
1030 extern const char *blk_op_str(enum req_op op);
1031
1032 int blk_status_to_errno(blk_status_t status);
1033 blk_status_t errno_to_blk_status(int errno);
1034 const char *blk_status_to_str(blk_status_t status);
1035
1036 /* only poll the hardware once, don't continue until a completion was found */
1037 #define BLK_POLL_ONESHOT (1 << 0)
1038 int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags);
1039 int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
1040 unsigned int flags);
1041
bdev_get_queue(struct block_device * bdev)1042 static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
1043 {
1044 return bdev->bd_queue; /* this is never NULL */
1045 }
1046
1047 /* Convert a zone condition BLK_ZONE_COND_name into the string "name" */
1048 const char *blk_zone_cond_str(enum blk_zone_cond zone_cond);
1049
bio_zone_no(struct bio * bio)1050 static inline unsigned int bio_zone_no(struct bio *bio)
1051 {
1052 return disk_zone_no(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector);
1053 }
1054
bio_straddles_zones(struct bio * bio)1055 static inline bool bio_straddles_zones(struct bio *bio)
1056 {
1057 return bio_sectors(bio) &&
1058 bio_zone_no(bio) !=
1059 disk_zone_no(bio->bi_bdev->bd_disk, bio_end_sector(bio) - 1);
1060 }
1061
1062 /*
1063 * Return how much within the boundary is left to be used for I/O at a given
1064 * offset.
1065 */
blk_boundary_sectors_left(sector_t offset,unsigned int boundary_sectors)1066 static inline unsigned int blk_boundary_sectors_left(sector_t offset,
1067 unsigned int boundary_sectors)
1068 {
1069 if (unlikely(!is_power_of_2(boundary_sectors)))
1070 return boundary_sectors - sector_div(offset, boundary_sectors);
1071 return boundary_sectors - (offset & (boundary_sectors - 1));
1072 }
1073
1074 /**
1075 * queue_limits_start_update - start an atomic update of queue limits
1076 * @q: queue to update
1077 *
1078 * This functions starts an atomic update of the queue limits. It takes a lock
1079 * to prevent other updates and returns a snapshot of the current limits that
1080 * the caller can modify. The caller must call queue_limits_commit_update()
1081 * to finish the update.
1082 *
1083 * Context: process context.
1084 */
1085 static inline struct queue_limits
queue_limits_start_update(struct request_queue * q)1086 queue_limits_start_update(struct request_queue *q)
1087 {
1088 mutex_lock(&q->limits_lock);
1089 return q->limits;
1090 }
1091 int queue_limits_commit_update_frozen(struct request_queue *q,
1092 struct queue_limits *lim);
1093 int queue_limits_commit_update(struct request_queue *q,
1094 struct queue_limits *lim);
1095 int queue_limits_set(struct request_queue *q, struct queue_limits *lim);
1096 int blk_validate_limits(struct queue_limits *lim);
1097
1098 /**
1099 * queue_limits_cancel_update - cancel an atomic update of queue limits
1100 * @q: queue to update
1101 *
1102 * This functions cancels an atomic update of the queue limits started by
1103 * queue_limits_start_update() and should be used when an error occurs after
1104 * starting update.
1105 */
queue_limits_cancel_update(struct request_queue * q)1106 static inline void queue_limits_cancel_update(struct request_queue *q)
1107 {
1108 mutex_unlock(&q->limits_lock);
1109 }
1110
1111 /*
1112 * These helpers are for drivers that have sloppy feature negotiation and might
1113 * have to disable DISCARD, WRITE_ZEROES or SECURE_DISCARD from the I/O
1114 * completion handler when the device returned an indicator that the respective
1115 * feature is not actually supported. They are racy and the driver needs to
1116 * cope with that. Try to avoid this scheme if you can.
1117 */
blk_queue_disable_discard(struct request_queue * q)1118 static inline void blk_queue_disable_discard(struct request_queue *q)
1119 {
1120 q->limits.max_discard_sectors = 0;
1121 }
1122
blk_queue_disable_secure_erase(struct request_queue * q)1123 static inline void blk_queue_disable_secure_erase(struct request_queue *q)
1124 {
1125 q->limits.max_secure_erase_sectors = 0;
1126 }
1127
blk_queue_disable_write_zeroes(struct request_queue * q)1128 static inline void blk_queue_disable_write_zeroes(struct request_queue *q)
1129 {
1130 q->limits.max_write_zeroes_sectors = 0;
1131 q->limits.max_wzeroes_unmap_sectors = 0;
1132 }
1133
1134 /*
1135 * Access functions for manipulating queue properties
1136 */
1137 extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
1138 extern void blk_set_stacking_limits(struct queue_limits *lim);
1139 extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
1140 sector_t offset);
1141 void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
1142 sector_t offset, const char *pfx);
1143 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
1144
1145 struct blk_independent_access_ranges *
1146 disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges);
1147 void disk_set_independent_access_ranges(struct gendisk *disk,
1148 struct blk_independent_access_ranges *iars);
1149
1150 bool __must_check blk_get_queue(struct request_queue *);
1151 extern void blk_put_queue(struct request_queue *);
1152
1153 void blk_mark_disk_dead(struct gendisk *disk);
1154
1155 struct rq_list {
1156 struct request *head;
1157 struct request *tail;
1158 };
1159
1160 #ifdef CONFIG_BLOCK
1161 /*
1162 * blk_plug permits building a queue of related requests by holding the I/O
1163 * fragments for a short period. This allows merging of sequential requests
1164 * into single larger request. As the requests are moved from a per-task list to
1165 * the device's request_queue in a batch, this results in improved scalability
1166 * as the lock contention for request_queue lock is reduced.
1167 *
1168 * It is ok not to disable preemption when adding the request to the plug list
1169 * or when attempting a merge. For details, please see schedule() where
1170 * blk_flush_plug() is called.
1171 */
1172 struct blk_plug {
1173 struct rq_list mq_list; /* blk-mq requests */
1174
1175 /* if ios_left is > 1, we can batch tag/rq allocations */
1176 struct rq_list cached_rqs;
1177 u64 cur_ktime;
1178 unsigned short nr_ios;
1179
1180 unsigned short rq_count;
1181
1182 bool multiple_queues;
1183 bool has_elevator;
1184
1185 struct list_head cb_list; /* md requires an unplug callback */
1186 };
1187
1188 struct blk_plug_cb;
1189 typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
1190 struct blk_plug_cb {
1191 struct list_head list;
1192 blk_plug_cb_fn callback;
1193 void *data;
1194 };
1195 extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
1196 void *data, int size);
1197 extern void blk_start_plug(struct blk_plug *);
1198 extern void blk_start_plug_nr_ios(struct blk_plug *, unsigned short);
1199 extern void blk_finish_plug(struct blk_plug *);
1200
1201 void __blk_flush_plug(struct blk_plug *plug, bool from_schedule);
blk_flush_plug(struct blk_plug * plug,bool async)1202 static inline void blk_flush_plug(struct blk_plug *plug, bool async)
1203 {
1204 if (plug)
1205 __blk_flush_plug(plug, async);
1206 }
1207
1208 /*
1209 * tsk == current here
1210 */
blk_plug_invalidate_ts(struct task_struct * tsk)1211 static inline void blk_plug_invalidate_ts(struct task_struct *tsk)
1212 {
1213 struct blk_plug *plug = tsk->plug;
1214
1215 if (plug)
1216 plug->cur_ktime = 0;
1217 current->flags &= ~PF_BLOCK_TS;
1218 }
1219
1220 int blkdev_issue_flush(struct block_device *bdev);
1221 long nr_blockdev_pages(void);
1222 #else /* CONFIG_BLOCK */
1223 struct blk_plug {
1224 };
1225
blk_start_plug_nr_ios(struct blk_plug * plug,unsigned short nr_ios)1226 static inline void blk_start_plug_nr_ios(struct blk_plug *plug,
1227 unsigned short nr_ios)
1228 {
1229 }
1230
blk_start_plug(struct blk_plug * plug)1231 static inline void blk_start_plug(struct blk_plug *plug)
1232 {
1233 }
1234
blk_finish_plug(struct blk_plug * plug)1235 static inline void blk_finish_plug(struct blk_plug *plug)
1236 {
1237 }
1238
blk_flush_plug(struct blk_plug * plug,bool async)1239 static inline void blk_flush_plug(struct blk_plug *plug, bool async)
1240 {
1241 }
1242
blk_plug_invalidate_ts(struct task_struct * tsk)1243 static inline void blk_plug_invalidate_ts(struct task_struct *tsk)
1244 {
1245 }
1246
blkdev_issue_flush(struct block_device * bdev)1247 static inline int blkdev_issue_flush(struct block_device *bdev)
1248 {
1249 return 0;
1250 }
1251
nr_blockdev_pages(void)1252 static inline long nr_blockdev_pages(void)
1253 {
1254 return 0;
1255 }
1256 #endif /* CONFIG_BLOCK */
1257
1258 extern void blk_io_schedule(void);
1259
1260 int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1261 sector_t nr_sects, gfp_t gfp_mask);
1262 void __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1263 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop);
1264 int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
1265 sector_t nr_sects, gfp_t gfp);
1266
1267 #define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */
1268 #define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */
1269 #define BLKDEV_ZERO_KILLABLE (1 << 2) /* interruptible by fatal signals */
1270
1271 extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
1272 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
1273 unsigned flags);
1274 extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
1275 sector_t nr_sects, gfp_t gfp_mask, unsigned flags);
1276
sb_issue_discard(struct super_block * sb,sector_t block,sector_t nr_blocks,gfp_t gfp_mask,unsigned long flags)1277 static inline int sb_issue_discard(struct super_block *sb, sector_t block,
1278 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
1279 {
1280 return blkdev_issue_discard(sb->s_bdev,
1281 block << (sb->s_blocksize_bits -
1282 SECTOR_SHIFT),
1283 nr_blocks << (sb->s_blocksize_bits -
1284 SECTOR_SHIFT),
1285 gfp_mask);
1286 }
sb_issue_zeroout(struct super_block * sb,sector_t block,sector_t nr_blocks,gfp_t gfp_mask)1287 static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
1288 sector_t nr_blocks, gfp_t gfp_mask)
1289 {
1290 return blkdev_issue_zeroout(sb->s_bdev,
1291 block << (sb->s_blocksize_bits -
1292 SECTOR_SHIFT),
1293 nr_blocks << (sb->s_blocksize_bits -
1294 SECTOR_SHIFT),
1295 gfp_mask, 0);
1296 }
1297
bdev_is_partition(struct block_device * bdev)1298 static inline bool bdev_is_partition(struct block_device *bdev)
1299 {
1300 return bdev_partno(bdev) != 0;
1301 }
1302
1303 enum blk_default_limits {
1304 BLK_MAX_SEGMENTS = 128,
1305 BLK_SAFE_MAX_SECTORS = 255,
1306 BLK_MAX_SEGMENT_SIZE = 65536,
1307 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
1308 };
1309
bdev_limits(struct block_device * bdev)1310 static inline struct queue_limits *bdev_limits(struct block_device *bdev)
1311 {
1312 return &bdev_get_queue(bdev)->limits;
1313 }
1314
queue_segment_boundary(const struct request_queue * q)1315 static inline unsigned long queue_segment_boundary(const struct request_queue *q)
1316 {
1317 return q->limits.seg_boundary_mask;
1318 }
1319
queue_virt_boundary(const struct request_queue * q)1320 static inline unsigned long queue_virt_boundary(const struct request_queue *q)
1321 {
1322 return q->limits.virt_boundary_mask;
1323 }
1324
queue_max_sectors(const struct request_queue * q)1325 static inline unsigned int queue_max_sectors(const struct request_queue *q)
1326 {
1327 return q->limits.max_sectors;
1328 }
1329
queue_max_bytes(struct request_queue * q)1330 static inline unsigned int queue_max_bytes(struct request_queue *q)
1331 {
1332 return min_t(unsigned int, queue_max_sectors(q), INT_MAX >> 9) << 9;
1333 }
1334
queue_max_hw_sectors(const struct request_queue * q)1335 static inline unsigned int queue_max_hw_sectors(const struct request_queue *q)
1336 {
1337 return q->limits.max_hw_sectors;
1338 }
1339
queue_max_segments(const struct request_queue * q)1340 static inline unsigned short queue_max_segments(const struct request_queue *q)
1341 {
1342 return q->limits.max_segments;
1343 }
1344
queue_max_discard_segments(const struct request_queue * q)1345 static inline unsigned short queue_max_discard_segments(const struct request_queue *q)
1346 {
1347 return q->limits.max_discard_segments;
1348 }
1349
queue_max_segment_size(const struct request_queue * q)1350 static inline unsigned int queue_max_segment_size(const struct request_queue *q)
1351 {
1352 return q->limits.max_segment_size;
1353 }
1354
queue_emulates_zone_append(struct request_queue * q)1355 static inline bool queue_emulates_zone_append(struct request_queue *q)
1356 {
1357 return blk_queue_is_zoned(q) && !q->limits.max_hw_zone_append_sectors;
1358 }
1359
bdev_emulates_zone_append(struct block_device * bdev)1360 static inline bool bdev_emulates_zone_append(struct block_device *bdev)
1361 {
1362 return queue_emulates_zone_append(bdev_get_queue(bdev));
1363 }
1364
1365 static inline unsigned int
bdev_max_zone_append_sectors(struct block_device * bdev)1366 bdev_max_zone_append_sectors(struct block_device *bdev)
1367 {
1368 return bdev_limits(bdev)->max_zone_append_sectors;
1369 }
1370
bdev_max_segments(struct block_device * bdev)1371 static inline unsigned int bdev_max_segments(struct block_device *bdev)
1372 {
1373 return queue_max_segments(bdev_get_queue(bdev));
1374 }
1375
bdev_max_write_streams(struct block_device * bdev)1376 static inline unsigned short bdev_max_write_streams(struct block_device *bdev)
1377 {
1378 if (bdev_is_partition(bdev))
1379 return 0;
1380 return bdev_limits(bdev)->max_write_streams;
1381 }
1382
queue_logical_block_size(const struct request_queue * q)1383 static inline unsigned queue_logical_block_size(const struct request_queue *q)
1384 {
1385 return q->limits.logical_block_size;
1386 }
1387
bdev_logical_block_size(struct block_device * bdev)1388 static inline unsigned int bdev_logical_block_size(struct block_device *bdev)
1389 {
1390 return queue_logical_block_size(bdev_get_queue(bdev));
1391 }
1392
queue_physical_block_size(const struct request_queue * q)1393 static inline unsigned int queue_physical_block_size(const struct request_queue *q)
1394 {
1395 return q->limits.physical_block_size;
1396 }
1397
bdev_physical_block_size(struct block_device * bdev)1398 static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
1399 {
1400 return queue_physical_block_size(bdev_get_queue(bdev));
1401 }
1402
queue_io_min(const struct request_queue * q)1403 static inline unsigned int queue_io_min(const struct request_queue *q)
1404 {
1405 return q->limits.io_min;
1406 }
1407
bdev_io_min(struct block_device * bdev)1408 static inline unsigned int bdev_io_min(struct block_device *bdev)
1409 {
1410 return queue_io_min(bdev_get_queue(bdev));
1411 }
1412
queue_io_opt(const struct request_queue * q)1413 static inline unsigned int queue_io_opt(const struct request_queue *q)
1414 {
1415 return q->limits.io_opt;
1416 }
1417
bdev_io_opt(struct block_device * bdev)1418 static inline unsigned int bdev_io_opt(struct block_device *bdev)
1419 {
1420 return queue_io_opt(bdev_get_queue(bdev));
1421 }
1422
1423 static inline unsigned int
queue_zone_write_granularity(const struct request_queue * q)1424 queue_zone_write_granularity(const struct request_queue *q)
1425 {
1426 return q->limits.zone_write_granularity;
1427 }
1428
1429 static inline unsigned int
bdev_zone_write_granularity(struct block_device * bdev)1430 bdev_zone_write_granularity(struct block_device *bdev)
1431 {
1432 return queue_zone_write_granularity(bdev_get_queue(bdev));
1433 }
1434
1435 int bdev_alignment_offset(struct block_device *bdev);
1436 unsigned int bdev_discard_alignment(struct block_device *bdev);
1437
bdev_max_discard_sectors(struct block_device * bdev)1438 static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev)
1439 {
1440 return bdev_limits(bdev)->max_discard_sectors;
1441 }
1442
bdev_discard_granularity(struct block_device * bdev)1443 static inline unsigned int bdev_discard_granularity(struct block_device *bdev)
1444 {
1445 return bdev_limits(bdev)->discard_granularity;
1446 }
1447
1448 static inline unsigned int
bdev_max_secure_erase_sectors(struct block_device * bdev)1449 bdev_max_secure_erase_sectors(struct block_device *bdev)
1450 {
1451 return bdev_limits(bdev)->max_secure_erase_sectors;
1452 }
1453
bdev_write_zeroes_sectors(struct block_device * bdev)1454 static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
1455 {
1456 return bdev_limits(bdev)->max_write_zeroes_sectors;
1457 }
1458
1459 static inline unsigned int
bdev_write_zeroes_unmap_sectors(struct block_device * bdev)1460 bdev_write_zeroes_unmap_sectors(struct block_device *bdev)
1461 {
1462 return bdev_limits(bdev)->max_wzeroes_unmap_sectors;
1463 }
1464
bdev_rot(struct block_device * bdev)1465 static inline bool bdev_rot(struct block_device *bdev)
1466 {
1467 return blk_queue_rot(bdev_get_queue(bdev));
1468 }
1469
bdev_nonrot(struct block_device * bdev)1470 static inline bool bdev_nonrot(struct block_device *bdev)
1471 {
1472 return !bdev_rot(bdev);
1473 }
1474
bdev_synchronous(struct block_device * bdev)1475 static inline bool bdev_synchronous(struct block_device *bdev)
1476 {
1477 return bdev->bd_disk->queue->limits.features & BLK_FEAT_SYNCHRONOUS;
1478 }
1479
bdev_stable_writes(struct block_device * bdev)1480 static inline bool bdev_stable_writes(struct block_device *bdev)
1481 {
1482 struct request_queue *q = bdev_get_queue(bdev);
1483
1484 if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
1485 q->limits.integrity.csum_type != BLK_INTEGRITY_CSUM_NONE)
1486 return true;
1487 return q->limits.features & BLK_FEAT_STABLE_WRITES;
1488 }
1489
blk_queue_write_cache(struct request_queue * q)1490 static inline bool blk_queue_write_cache(struct request_queue *q)
1491 {
1492 return (q->limits.features & BLK_FEAT_WRITE_CACHE) &&
1493 !(q->limits.flags & BLK_FLAG_WRITE_CACHE_DISABLED);
1494 }
1495
bdev_write_cache(struct block_device * bdev)1496 static inline bool bdev_write_cache(struct block_device *bdev)
1497 {
1498 return blk_queue_write_cache(bdev_get_queue(bdev));
1499 }
1500
bdev_fua(struct block_device * bdev)1501 static inline bool bdev_fua(struct block_device *bdev)
1502 {
1503 return bdev_limits(bdev)->features & BLK_FEAT_FUA;
1504 }
1505
bdev_nowait(struct block_device * bdev)1506 static inline bool bdev_nowait(struct block_device *bdev)
1507 {
1508 return bdev->bd_disk->queue->limits.features & BLK_FEAT_NOWAIT;
1509 }
1510
bdev_is_zoned(struct block_device * bdev)1511 static inline bool bdev_is_zoned(struct block_device *bdev)
1512 {
1513 return blk_queue_is_zoned(bdev_get_queue(bdev));
1514 }
1515
bdev_zone_no(struct block_device * bdev,sector_t sec)1516 static inline unsigned int bdev_zone_no(struct block_device *bdev, sector_t sec)
1517 {
1518 return disk_zone_no(bdev->bd_disk, sec);
1519 }
1520
bdev_zone_sectors(struct block_device * bdev)1521 static inline sector_t bdev_zone_sectors(struct block_device *bdev)
1522 {
1523 struct request_queue *q = bdev_get_queue(bdev);
1524
1525 if (!blk_queue_is_zoned(q))
1526 return 0;
1527 return q->limits.chunk_sectors;
1528 }
1529
bdev_zone_start(struct block_device * bdev,sector_t sector)1530 static inline sector_t bdev_zone_start(struct block_device *bdev,
1531 sector_t sector)
1532 {
1533 return sector & ~(bdev_zone_sectors(bdev) - 1);
1534 }
1535
bdev_offset_from_zone_start(struct block_device * bdev,sector_t sector)1536 static inline sector_t bdev_offset_from_zone_start(struct block_device *bdev,
1537 sector_t sector)
1538 {
1539 return sector & (bdev_zone_sectors(bdev) - 1);
1540 }
1541
bio_offset_from_zone_start(struct bio * bio)1542 static inline sector_t bio_offset_from_zone_start(struct bio *bio)
1543 {
1544 return bdev_offset_from_zone_start(bio->bi_bdev,
1545 bio->bi_iter.bi_sector);
1546 }
1547
bdev_is_zone_start(struct block_device * bdev,sector_t sector)1548 static inline bool bdev_is_zone_start(struct block_device *bdev,
1549 sector_t sector)
1550 {
1551 return bdev_offset_from_zone_start(bdev, sector) == 0;
1552 }
1553
1554 /* Check whether @sector is a multiple of the zone size. */
bdev_is_zone_aligned(struct block_device * bdev,sector_t sector)1555 static inline bool bdev_is_zone_aligned(struct block_device *bdev,
1556 sector_t sector)
1557 {
1558 return bdev_is_zone_start(bdev, sector);
1559 }
1560
1561 int blk_zone_issue_zeroout(struct block_device *bdev, sector_t sector,
1562 sector_t nr_sects, gfp_t gfp_mask);
1563
queue_dma_alignment(const struct request_queue * q)1564 static inline unsigned int queue_dma_alignment(const struct request_queue *q)
1565 {
1566 return q->limits.dma_alignment;
1567 }
1568
1569 static inline unsigned int
queue_atomic_write_unit_max_bytes(const struct request_queue * q)1570 queue_atomic_write_unit_max_bytes(const struct request_queue *q)
1571 {
1572 return q->limits.atomic_write_unit_max;
1573 }
1574
1575 static inline unsigned int
queue_atomic_write_unit_min_bytes(const struct request_queue * q)1576 queue_atomic_write_unit_min_bytes(const struct request_queue *q)
1577 {
1578 return q->limits.atomic_write_unit_min;
1579 }
1580
1581 static inline unsigned int
queue_atomic_write_boundary_bytes(const struct request_queue * q)1582 queue_atomic_write_boundary_bytes(const struct request_queue *q)
1583 {
1584 return q->limits.atomic_write_boundary_sectors << SECTOR_SHIFT;
1585 }
1586
1587 static inline unsigned int
queue_atomic_write_max_bytes(const struct request_queue * q)1588 queue_atomic_write_max_bytes(const struct request_queue *q)
1589 {
1590 return q->limits.atomic_write_max_sectors << SECTOR_SHIFT;
1591 }
1592
bdev_dma_alignment(struct block_device * bdev)1593 static inline unsigned int bdev_dma_alignment(struct block_device *bdev)
1594 {
1595 return queue_dma_alignment(bdev_get_queue(bdev));
1596 }
1597
1598 static inline unsigned int
blk_lim_dma_alignment_and_pad(struct queue_limits * lim)1599 blk_lim_dma_alignment_and_pad(struct queue_limits *lim)
1600 {
1601 return lim->dma_alignment | lim->dma_pad_mask;
1602 }
1603
blk_rq_aligned(struct request_queue * q,unsigned long addr,unsigned int len)1604 static inline bool blk_rq_aligned(struct request_queue *q, unsigned long addr,
1605 unsigned int len)
1606 {
1607 unsigned int alignment = blk_lim_dma_alignment_and_pad(&q->limits);
1608
1609 return !(addr & alignment) && !(len & alignment);
1610 }
1611
1612 /* assumes size > 256 */
blksize_bits(unsigned int size)1613 static inline unsigned int blksize_bits(unsigned int size)
1614 {
1615 return order_base_2(size >> SECTOR_SHIFT) + SECTOR_SHIFT;
1616 }
1617
1618 int kblockd_schedule_work(struct work_struct *work);
1619 int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
1620
1621 #define MODULE_ALIAS_BLOCKDEV(major,minor) \
1622 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
1623 #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
1624 MODULE_ALIAS("block-major-" __stringify(major) "-*")
1625
1626 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
1627
1628 bool blk_crypto_register(struct blk_crypto_profile *profile,
1629 struct request_queue *q);
1630
1631 #else /* CONFIG_BLK_INLINE_ENCRYPTION */
1632
blk_crypto_register(struct blk_crypto_profile * profile,struct request_queue * q)1633 static inline bool blk_crypto_register(struct blk_crypto_profile *profile,
1634 struct request_queue *q)
1635 {
1636 return true;
1637 }
1638
1639 #endif /* CONFIG_BLK_INLINE_ENCRYPTION */
1640
1641 enum blk_unique_id {
1642 /* these match the Designator Types specified in SPC */
1643 BLK_UID_T10 = 1,
1644 BLK_UID_EUI64 = 2,
1645 BLK_UID_NAA = 3,
1646 };
1647
1648 struct block_device_operations {
1649 void (*submit_bio)(struct bio *bio);
1650 int (*poll_bio)(struct bio *bio, struct io_comp_batch *iob,
1651 unsigned int flags);
1652 int (*open)(struct gendisk *disk, blk_mode_t mode);
1653 void (*release)(struct gendisk *disk);
1654 int (*ioctl)(struct block_device *bdev, blk_mode_t mode,
1655 unsigned cmd, unsigned long arg);
1656 int (*compat_ioctl)(struct block_device *bdev, blk_mode_t mode,
1657 unsigned cmd, unsigned long arg);
1658 unsigned int (*check_events) (struct gendisk *disk,
1659 unsigned int clearing);
1660 void (*unlock_native_capacity) (struct gendisk *);
1661 int (*getgeo)(struct gendisk *, struct hd_geometry *);
1662 int (*set_read_only)(struct block_device *bdev, bool ro);
1663 void (*free_disk)(struct gendisk *disk);
1664 /* this callback is with swap_lock and sometimes page table lock held */
1665 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
1666 int (*report_zones)(struct gendisk *, sector_t sector,
1667 unsigned int nr_zones,
1668 struct blk_report_zones_args *args);
1669 char *(*devnode)(struct gendisk *disk, umode_t *mode);
1670 /* returns the length of the identifier or a negative errno: */
1671 int (*get_unique_id)(struct gendisk *disk, u8 id[16],
1672 enum blk_unique_id id_type);
1673 struct module *owner;
1674 const struct pr_ops *pr_ops;
1675
1676 /*
1677 * Special callback for probing GPT entry at a given sector.
1678 * Needed by Android devices, used by GPT scanner and MMC blk
1679 * driver.
1680 */
1681 int (*alternative_gpt_sector)(struct gendisk *disk, sector_t *sector);
1682 };
1683
1684 #ifdef CONFIG_COMPAT
1685 extern int blkdev_compat_ptr_ioctl(struct block_device *, blk_mode_t,
1686 unsigned int, unsigned long);
1687 #else
1688 #define blkdev_compat_ptr_ioctl NULL
1689 #endif
1690
blk_wake_io_task(struct task_struct * waiter)1691 static inline void blk_wake_io_task(struct task_struct *waiter)
1692 {
1693 /*
1694 * If we're polling, the task itself is doing the completions. For
1695 * that case, we don't need to signal a wakeup, it's enough to just
1696 * mark us as RUNNING.
1697 */
1698 if (waiter == current)
1699 __set_current_state(TASK_RUNNING);
1700 else
1701 wake_up_process(waiter);
1702 }
1703
1704 unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op,
1705 unsigned long start_time);
1706 void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
1707 unsigned int sectors, unsigned long start_time);
1708
1709 unsigned long bio_start_io_acct(struct bio *bio);
1710 void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
1711 struct block_device *orig_bdev);
1712
1713 /**
1714 * bio_end_io_acct - end I/O accounting for bio based drivers
1715 * @bio: bio to end account for
1716 * @start_time: start time returned by bio_start_io_acct()
1717 */
bio_end_io_acct(struct bio * bio,unsigned long start_time)1718 static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time)
1719 {
1720 return bio_end_io_acct_remapped(bio, start_time, bio->bi_bdev);
1721 }
1722
1723 int bdev_validate_blocksize(struct block_device *bdev, int block_size);
1724 int set_blocksize(struct file *file, int size);
1725
1726 int lookup_bdev(const char *pathname, dev_t *dev);
1727
1728 void blkdev_show(struct seq_file *seqf, off_t offset);
1729
1730 #define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */
1731 #define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */
1732 #ifdef CONFIG_BLOCK
1733 #define BLKDEV_MAJOR_MAX 512
1734 #else
1735 #define BLKDEV_MAJOR_MAX 0
1736 #endif
1737
1738 struct blk_holder_ops {
1739 void (*mark_dead)(struct block_device *bdev, bool surprise);
1740
1741 /*
1742 * Sync the file system mounted on the block device.
1743 */
1744 void (*sync)(struct block_device *bdev);
1745
1746 /*
1747 * Freeze the file system mounted on the block device.
1748 */
1749 int (*freeze)(struct block_device *bdev);
1750
1751 /*
1752 * Thaw the file system mounted on the block device.
1753 */
1754 int (*thaw)(struct block_device *bdev);
1755 };
1756
1757 /*
1758 * For filesystems using @fs_holder_ops, the @holder argument passed to
1759 * helpers used to open and claim block devices via
1760 * bd_prepare_to_claim() must point to a superblock.
1761 */
1762 extern const struct blk_holder_ops fs_holder_ops;
1763
1764 /*
1765 * Return the correct open flags for blkdev_get_by_* for super block flags
1766 * as stored in sb->s_flags.
1767 */
1768 #define sb_open_mode(flags) \
1769 (BLK_OPEN_READ | BLK_OPEN_RESTRICT_WRITES | \
1770 (((flags) & SB_RDONLY) ? 0 : BLK_OPEN_WRITE))
1771
1772 struct file *bdev_file_open_by_dev(dev_t dev, blk_mode_t mode, void *holder,
1773 const struct blk_holder_ops *hops);
1774 struct file *bdev_file_open_by_path(const char *path, blk_mode_t mode,
1775 void *holder, const struct blk_holder_ops *hops);
1776 int bd_prepare_to_claim(struct block_device *bdev, void *holder,
1777 const struct blk_holder_ops *hops);
1778 void bd_abort_claiming(struct block_device *bdev, void *holder);
1779
1780 struct block_device *I_BDEV(struct inode *inode);
1781 struct block_device *file_bdev(struct file *bdev_file);
1782 bool disk_live(struct gendisk *disk);
1783 unsigned int block_size(struct block_device *bdev);
1784
1785 #ifdef CONFIG_BLOCK
1786 void invalidate_bdev(struct block_device *bdev);
1787 int sync_blockdev(struct block_device *bdev);
1788 int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend);
1789 int sync_blockdev_nowait(struct block_device *bdev);
1790 void sync_bdevs(bool wait);
1791 void bdev_statx(const struct path *path, struct kstat *stat, u32 request_mask);
1792 void printk_all_partitions(void);
1793 int __init early_lookup_bdev(const char *pathname, dev_t *dev);
1794 #else
invalidate_bdev(struct block_device * bdev)1795 static inline void invalidate_bdev(struct block_device *bdev)
1796 {
1797 }
sync_blockdev(struct block_device * bdev)1798 static inline int sync_blockdev(struct block_device *bdev)
1799 {
1800 return 0;
1801 }
sync_blockdev_nowait(struct block_device * bdev)1802 static inline int sync_blockdev_nowait(struct block_device *bdev)
1803 {
1804 return 0;
1805 }
sync_bdevs(bool wait)1806 static inline void sync_bdevs(bool wait)
1807 {
1808 }
bdev_statx(const struct path * path,struct kstat * stat,u32 request_mask)1809 static inline void bdev_statx(const struct path *path, struct kstat *stat,
1810 u32 request_mask)
1811 {
1812 }
printk_all_partitions(void)1813 static inline void printk_all_partitions(void)
1814 {
1815 }
early_lookup_bdev(const char * pathname,dev_t * dev)1816 static inline int early_lookup_bdev(const char *pathname, dev_t *dev)
1817 {
1818 return -EINVAL;
1819 }
1820 #endif /* CONFIG_BLOCK */
1821
1822 int bdev_freeze(struct block_device *bdev);
1823 int bdev_thaw(struct block_device *bdev);
1824 void bdev_fput(struct file *bdev_file);
1825
1826 struct io_comp_batch {
1827 struct rq_list req_list;
1828 bool need_ts;
1829 void (*complete)(struct io_comp_batch *);
1830 void *poll_ctx;
1831 };
1832
blk_atomic_write_start_sect_aligned(sector_t sector,struct queue_limits * limits)1833 static inline bool blk_atomic_write_start_sect_aligned(sector_t sector,
1834 struct queue_limits *limits)
1835 {
1836 unsigned int alignment = max(limits->atomic_write_hw_unit_min,
1837 limits->atomic_write_hw_boundary);
1838
1839 return IS_ALIGNED(sector, alignment >> SECTOR_SHIFT);
1840 }
1841
bdev_can_atomic_write(struct block_device * bdev)1842 static inline bool bdev_can_atomic_write(struct block_device *bdev)
1843 {
1844 struct request_queue *bd_queue = bdev->bd_queue;
1845 struct queue_limits *limits = &bd_queue->limits;
1846
1847 if (!limits->atomic_write_unit_min)
1848 return false;
1849
1850 if (bdev_is_partition(bdev))
1851 return blk_atomic_write_start_sect_aligned(bdev->bd_start_sect,
1852 limits);
1853
1854 return true;
1855 }
1856
1857 static inline unsigned int
bdev_atomic_write_unit_min_bytes(struct block_device * bdev)1858 bdev_atomic_write_unit_min_bytes(struct block_device *bdev)
1859 {
1860 if (!bdev_can_atomic_write(bdev))
1861 return 0;
1862 return queue_atomic_write_unit_min_bytes(bdev_get_queue(bdev));
1863 }
1864
1865 static inline unsigned int
bdev_atomic_write_unit_max_bytes(struct block_device * bdev)1866 bdev_atomic_write_unit_max_bytes(struct block_device *bdev)
1867 {
1868 if (!bdev_can_atomic_write(bdev))
1869 return 0;
1870 return queue_atomic_write_unit_max_bytes(bdev_get_queue(bdev));
1871 }
1872
bio_split_rw_at(struct bio * bio,const struct queue_limits * lim,unsigned * segs,unsigned max_bytes)1873 static inline int bio_split_rw_at(struct bio *bio,
1874 const struct queue_limits *lim,
1875 unsigned *segs, unsigned max_bytes)
1876 {
1877 return bio_split_io_at(bio, lim, segs, max_bytes, lim->dma_alignment);
1878 }
1879
1880 #define DEFINE_IO_COMP_BATCH(name) struct io_comp_batch name = { }
1881
1882 #endif /* _LINUX_BLKDEV_H */
1883