1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 md.h : kernel internal structure of the Linux MD driver
4 Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
5
6 */
7
8 #ifndef _MD_MD_H
9 #define _MD_MD_H
10
11 #include <linux/blkdev.h>
12 #include <linux/backing-dev.h>
13 #include <linux/badblocks.h>
14 #include <linux/kobject.h>
15 #include <linux/list.h>
16 #include <linux/mm.h>
17 #include <linux/mutex.h>
18 #include <linux/timer.h>
19 #include <linux/wait.h>
20 #include <linux/workqueue.h>
21 #include <linux/raid/md_u.h>
22 #include <trace/events/block.h>
23
24 #define MaxSector (~(sector_t)0)
25 /*
26 * Number of guaranteed raid bios in case of extreme VM load:
27 */
28 #define NR_RAID_BIOS 256
29
30 enum md_submodule_type {
31 MD_PERSONALITY = 0,
32 MD_CLUSTER,
33 MD_BITMAP,
34 };
35
36 enum md_submodule_id {
37 ID_LINEAR = LEVEL_LINEAR,
38 ID_RAID0 = 0,
39 ID_RAID1 = 1,
40 ID_RAID4 = 4,
41 ID_RAID5 = 5,
42 ID_RAID6 = 6,
43 ID_RAID10 = 10,
44 ID_CLUSTER,
45 ID_BITMAP,
46 ID_LLBITMAP,
47 ID_BITMAP_NONE,
48 };
49
50 struct md_submodule_head {
51 enum md_submodule_type type;
52 enum md_submodule_id id;
53 const char *name;
54 struct module *owner;
55 };
56
57 /*
58 * These flags should really be called "NO_RETRY" rather than
59 * "FAILFAST" because they don't make any promise about time lapse,
60 * only about the number of retries, which will be zero.
61 * REQ_FAILFAST_DRIVER is not included because
62 * Commit: 4a27446f3e39 ("[SCSI] modify scsi to handle new fail fast flags.")
63 * seems to suggest that the errors it avoids retrying should usually
64 * be retried.
65 */
66 #define MD_FAILFAST (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT)
67
68 /* Status of sync thread. */
69 enum sync_action {
70 /*
71 * Represent by MD_RECOVERY_SYNC, start when:
72 * 1) after assemble, sync data from first rdev to other copies, this
73 * must be done first before other sync actions and will only execute
74 * once;
75 * 2) resize the array(notice that this is not reshape), sync data for
76 * the new range;
77 */
78 ACTION_RESYNC,
79 /*
80 * Represent by MD_RECOVERY_RECOVER, start when:
81 * 1) for new replacement, sync data based on the replace rdev or
82 * available copies from other rdev;
83 * 2) for new member disk while the array is degraded, sync data from
84 * other rdev;
85 * 3) reassemble after power failure or re-add a hot removed rdev, sync
86 * data from first rdev to other copies based on bitmap;
87 */
88 ACTION_RECOVER,
89 /*
90 * Represent by MD_RECOVERY_SYNC | MD_RECOVERY_REQUESTED |
91 * MD_RECOVERY_CHECK, start when user echo "check" to sysfs api
92 * sync_action, used to check if data copies from differenct rdev are
93 * the same. The number of mismatch sectors will be exported to user
94 * by sysfs api mismatch_cnt;
95 */
96 ACTION_CHECK,
97 /*
98 * Represent by MD_RECOVERY_SYNC | MD_RECOVERY_REQUESTED, start when
99 * user echo "repair" to sysfs api sync_action, usually paired with
100 * ACTION_CHECK, used to force syncing data once user found that there
101 * are inconsistent data,
102 */
103 ACTION_REPAIR,
104 /*
105 * Represent by MD_RECOVERY_RESHAPE, start when new member disk is added
106 * to the conf, notice that this is different from spares or
107 * replacement;
108 */
109 ACTION_RESHAPE,
110 /*
111 * Represent by MD_RECOVERY_FROZEN, can be set by sysfs api sync_action
112 * or internal usage like setting the array read-only, will forbid above
113 * actions.
114 */
115 ACTION_FROZEN,
116 /*
117 * All above actions don't match.
118 */
119 ACTION_IDLE,
120 NR_SYNC_ACTIONS,
121 };
122
123 /*
124 * The struct embedded in rdev is used to serialize IO.
125 */
126 struct serial_in_rdev {
127 struct rb_root_cached serial_rb;
128 spinlock_t serial_lock;
129 wait_queue_head_t serial_io_wait;
130 };
131
132 /*
133 * MD's 'extended' device
134 */
135 struct md_rdev {
136 struct list_head same_set; /* RAID devices within the same set */
137
138 sector_t sectors; /* Device size (in 512bytes sectors) */
139 struct mddev *mddev; /* RAID array if running */
140 unsigned long last_events; /* IO event timestamp */
141
142 /*
143 * If meta_bdev is non-NULL, it means that a separate device is
144 * being used to store the metadata (superblock/bitmap) which
145 * would otherwise be contained on the same device as the data (bdev).
146 */
147 struct block_device *meta_bdev;
148 struct block_device *bdev; /* block device handle */
149 struct file *bdev_file; /* Handle from open for bdev */
150
151 struct page *sb_page, *bb_page;
152 int sb_loaded;
153 __u64 sb_events;
154 sector_t data_offset; /* start of data in array */
155 sector_t new_data_offset;/* only relevant while reshaping */
156 sector_t sb_start; /* offset of the super block (in 512byte sectors) */
157 int sb_size; /* bytes in the superblock */
158 int preferred_minor; /* autorun support */
159
160 struct kobject kobj;
161
162 /* A device can be in one of three states based on two flags:
163 * Not working: faulty==1 in_sync==0
164 * Fully working: faulty==0 in_sync==1
165 * Working, but not
166 * in sync with array
167 * faulty==0 in_sync==0
168 *
169 * It can never have faulty==1, in_sync==1
170 * This reduces the burden of testing multiple flags in many cases
171 */
172
173 unsigned long flags; /* bit set of 'enum flag_bits' bits. */
174 wait_queue_head_t blocked_wait;
175
176 int desc_nr; /* descriptor index in the superblock */
177 int raid_disk; /* role of device in array */
178 int new_raid_disk; /* role that the device will have in
179 * the array after a level-change completes.
180 */
181 int saved_raid_disk; /* role that device used to have in the
182 * array and could again if we did a partial
183 * resync from the bitmap
184 */
185 union {
186 sector_t recovery_offset;/* If this device has been partially
187 * recovered, this is where we were
188 * up to.
189 */
190 sector_t journal_tail; /* If this device is a journal device,
191 * this is the journal tail (journal
192 * recovery start point)
193 */
194 };
195
196 atomic_t nr_pending; /* number of pending requests.
197 * only maintained for arrays that
198 * support hot removal
199 */
200 atomic_t read_errors; /* number of consecutive read errors that
201 * we have tried to ignore.
202 */
203 time64_t last_read_error; /* monotonic time since our
204 * last read error
205 */
206 atomic_t corrected_errors; /* number of corrected read errors,
207 * for reporting to userspace and storing
208 * in superblock.
209 */
210
211 struct serial_in_rdev *serial; /* used for raid1 io serialization */
212
213 struct kernfs_node *sysfs_state; /* handle for 'state'
214 * sysfs entry */
215 /* handle for 'unacknowledged_bad_blocks' sysfs dentry */
216 struct kernfs_node *sysfs_unack_badblocks;
217 /* handle for 'bad_blocks' sysfs dentry */
218 struct kernfs_node *sysfs_badblocks;
219 struct badblocks badblocks;
220
221 struct {
222 short offset; /* Offset from superblock to start of PPL.
223 * Not used by external metadata. */
224 unsigned int size; /* Size in sectors of the PPL space */
225 sector_t sector; /* First sector of the PPL space */
226 } ppl;
227 };
228 enum flag_bits {
229 Faulty, /* device is known to have a fault */
230 In_sync, /* device is in_sync with rest of array */
231 Bitmap_sync, /* ..actually, not quite In_sync. Need a
232 * bitmap-based recovery to get fully in sync.
233 * The bit is only meaningful before device
234 * has been passed to pers->hot_add_disk.
235 */
236 WriteMostly, /* Avoid reading if at all possible */
237 AutoDetected, /* added by auto-detect */
238 Blocked, /* An error occurred but has not yet
239 * been acknowledged by the metadata
240 * handler, so don't allow writes
241 * until it is cleared */
242 WriteErrorSeen, /* A write error has been seen on this
243 * device
244 */
245 FaultRecorded, /* Intermediate state for clearing
246 * Blocked. The Fault is/will-be
247 * recorded in the metadata, but that
248 * metadata hasn't been stored safely
249 * on disk yet.
250 */
251 BlockedBadBlocks, /* A writer is blocked because they
252 * found an unacknowledged bad-block.
253 * This can safely be cleared at any
254 * time, and the writer will re-check.
255 * It may be set at any time, and at
256 * worst the writer will timeout and
257 * re-check. So setting it as
258 * accurately as possible is good, but
259 * not absolutely critical.
260 */
261 WantReplacement, /* This device is a candidate to be
262 * hot-replaced, either because it has
263 * reported some faults, or because
264 * of explicit request.
265 */
266 Replacement, /* This device is a replacement for
267 * a want_replacement device with same
268 * raid_disk number.
269 */
270 Candidate, /* For clustered environments only:
271 * This device is seen locally but not
272 * by the whole cluster
273 */
274 Journal, /* This device is used as journal for
275 * raid-5/6.
276 * Usually, this device should be faster
277 * than other devices in the array
278 */
279 ClusterRemove,
280 ExternalBbl, /* External metadata provides bad
281 * block management for a disk
282 */
283 FailFast, /* Minimal retries should be attempted on
284 * this device, so use REQ_FAILFAST_DEV.
285 * Also don't try to repair failed reads.
286 * It is expects that no bad block log
287 * is present.
288 */
289 LastDev, /* Seems to be the last working dev as
290 * it didn't fail, so don't use FailFast
291 * any more for metadata
292 */
293 CollisionCheck, /*
294 * check if there is collision between raid1
295 * serial bios.
296 */
297 Nonrot, /* non-rotational device (SSD) */
298 };
299
is_badblock(struct md_rdev * rdev,sector_t s,sector_t sectors,sector_t * first_bad,sector_t * bad_sectors)300 static inline int is_badblock(struct md_rdev *rdev, sector_t s, sector_t sectors,
301 sector_t *first_bad, sector_t *bad_sectors)
302 {
303 if (unlikely(rdev->badblocks.count)) {
304 int rv = badblocks_check(&rdev->badblocks, rdev->data_offset + s,
305 sectors,
306 first_bad, bad_sectors);
307 if (rv)
308 *first_bad -= rdev->data_offset;
309 return rv;
310 }
311 return 0;
312 }
313
rdev_has_badblock(struct md_rdev * rdev,sector_t s,int sectors)314 static inline int rdev_has_badblock(struct md_rdev *rdev, sector_t s,
315 int sectors)
316 {
317 sector_t first_bad;
318 sector_t bad_sectors;
319
320 return is_badblock(rdev, s, sectors, &first_bad, &bad_sectors);
321 }
322
323 extern bool rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
324 int is_new);
325 extern void rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
326 int is_new);
327 struct md_cluster_info;
328 struct md_cluster_operations;
329
330 /**
331 * enum mddev_flags - md device flags.
332 * @MD_ARRAY_FIRST_USE: First use of array, needs initialization.
333 * @MD_CLOSING: If set, we are closing the array, do not open it then.
334 * @MD_JOURNAL_CLEAN: A raid with journal is already clean.
335 * @MD_HAS_JOURNAL: The raid array has journal feature set.
336 * @MD_CLUSTER_RESYNC_LOCKED: cluster raid only, which means node, already took
337 * resync lock, need to release the lock.
338 * @MD_FAILFAST_SUPPORTED: Using MD_FAILFAST on metadata writes is supported as
339 * calls to md_error() will never cause the array to
340 * become failed.
341 * @MD_HAS_PPL: The raid array has PPL feature set.
342 * @MD_HAS_MULTIPLE_PPLS: The raid array has multiple PPLs feature set.
343 * @MD_NOT_READY: do_md_run() is active, so 'array_state', ust not report that
344 * array is ready yet.
345 * @MD_BROKEN: This is used to stop writes and mark array as failed.
346 * @MD_DELETED: This device is being deleted
347 * @MD_HAS_SUPERBLOCK: There is persistence sb in member disks.
348 * @MD_FAILLAST_DEV: Allow last rdev to be removed.
349 * @MD_SERIALIZE_POLICY: Enforce write IO is not reordered, just used by raid1.
350 *
351 * change UNSUPPORTED_MDDEV_FLAGS for each array type if new flag is added
352 */
353 enum mddev_flags {
354 MD_ARRAY_FIRST_USE,
355 MD_CLOSING,
356 MD_JOURNAL_CLEAN,
357 MD_HAS_JOURNAL,
358 MD_CLUSTER_RESYNC_LOCKED,
359 MD_FAILFAST_SUPPORTED,
360 MD_HAS_PPL,
361 MD_HAS_MULTIPLE_PPLS,
362 MD_NOT_READY,
363 MD_BROKEN,
364 MD_DO_DELETE,
365 MD_DELETED,
366 MD_HAS_SUPERBLOCK,
367 MD_FAILLAST_DEV,
368 MD_SERIALIZE_POLICY,
369 };
370
371 enum mddev_sb_flags {
372 MD_SB_CHANGE_DEVS, /* Some device status has changed */
373 MD_SB_CHANGE_CLEAN, /* transition to or from 'clean' */
374 MD_SB_CHANGE_PENDING, /* switch from 'clean' to 'active' in progress */
375 MD_SB_NEED_REWRITE, /* metadata write needs to be repeated */
376 };
377
378 #define NR_SERIAL_INFOS 8
379 /* record current range of serialize IOs */
380 struct serial_info {
381 struct rb_node node;
382 sector_t start; /* start sector of rb node */
383 sector_t last; /* end sector of rb node */
384 sector_t _subtree_last; /* highest sector in subtree of rb node */
385 };
386
387 /*
388 * mddev->curr_resync stores the current sector of the resync but
389 * also has some overloaded values.
390 */
391 enum {
392 /* No resync in progress */
393 MD_RESYNC_NONE = 0,
394 /* Yielded to allow another conflicting resync to commence */
395 MD_RESYNC_YIELDED = 1,
396 /* Delayed to check that there is no conflict with another sync */
397 MD_RESYNC_DELAYED = 2,
398 /* Any value greater than or equal to this is in an active resync */
399 MD_RESYNC_ACTIVE = 3,
400 };
401
402 struct mddev {
403 void *private;
404 struct md_personality *pers;
405 dev_t unit;
406 int md_minor;
407 struct list_head disks;
408 unsigned long flags;
409 unsigned long sb_flags;
410
411 int suspended;
412 struct mutex suspend_mutex;
413 struct percpu_ref active_io;
414 int ro;
415 int sysfs_active; /* set when sysfs deletes
416 * are happening, so run/
417 * takeover/stop are not safe
418 */
419 struct gendisk *gendisk; /* mdraid gendisk */
420 struct gendisk *dm_gendisk; /* dm-raid gendisk */
421
422 struct kobject kobj;
423 int hold_active;
424 #define UNTIL_IOCTL 1
425 #define UNTIL_STOP 2
426
427 /* Superblock information */
428 int major_version,
429 minor_version,
430 patch_version;
431 int persistent;
432 int external; /* metadata is
433 * managed externally */
434 char metadata_type[17]; /* externally set*/
435 int chunk_sectors;
436 time64_t ctime, utime;
437 int level, layout;
438 char clevel[16];
439 int raid_disks;
440 int max_disks;
441 sector_t dev_sectors; /* used size of
442 * component devices */
443 sector_t array_sectors; /* exported array size */
444 int external_size; /* size managed
445 * externally */
446 unsigned int logical_block_size;
447 __u64 events;
448 /* If the last 'event' was simply a clean->dirty transition, and
449 * we didn't write it to the spares, then it is safe and simple
450 * to just decrement the event count on a dirty->clean transition.
451 * So we record that possibility here.
452 */
453 int can_decrease_events;
454
455 char uuid[16];
456
457 /* If the array is being reshaped, we need to record the
458 * new shape and an indication of where we are up to.
459 * This is written to the superblock.
460 * If reshape_position is MaxSector, then no reshape is happening (yet).
461 */
462 sector_t reshape_position;
463 int delta_disks, new_level, new_layout;
464 int new_chunk_sectors;
465 int reshape_backwards;
466
467 struct md_thread __rcu *thread; /* management thread */
468 struct md_thread __rcu *sync_thread; /* doing resync or reconstruct */
469
470 /*
471 * Set when a sync operation is started. It holds this value even
472 * when the sync thread is "frozen" (interrupted) or "idle" (stopped
473 * or finished). It is overwritten when a new sync operation is begun.
474 */
475 enum sync_action last_sync_action;
476 sector_t curr_resync; /* last block scheduled */
477 /* As resync requests can complete out of order, we cannot easily track
478 * how much resync has been completed. So we occasionally pause until
479 * everything completes, then set curr_resync_completed to curr_resync.
480 * As such it may be well behind the real resync mark, but it is a value
481 * we are certain of.
482 */
483 sector_t curr_resync_completed;
484 unsigned long resync_mark; /* a recent timestamp */
485 sector_t resync_mark_cnt;/* blocks written at resync_mark */
486 sector_t curr_mark_cnt; /* blocks scheduled now */
487
488 sector_t resync_max_sectors; /* may be set by personality */
489
490 atomic64_t resync_mismatches; /* count of sectors where
491 * parity/replica mismatch found
492 */
493
494 /* allow user-space to request suspension of IO to regions of the array */
495 sector_t suspend_lo;
496 sector_t suspend_hi;
497 /* if zero, use the system-wide default */
498 int sync_speed_min;
499 int sync_speed_max;
500 int sync_io_depth;
501
502 /* resync even though the same disks are shared among md-devices */
503 int parallel_resync;
504
505 int ok_start_degraded;
506
507 unsigned long recovery;
508
509 int in_sync; /* know to not need resync */
510 /* 'open_mutex' avoids races between 'md_open' and 'do_md_stop', so
511 * that we are never stopping an array while it is open.
512 * 'reconfig_mutex' protects all other reconfiguration.
513 * These locks are separate due to conflicting interactions
514 * with disk->open_mutex.
515 * Lock ordering is:
516 * reconfig_mutex -> disk->open_mutex
517 * disk->open_mutex -> open_mutex: e.g. __blkdev_get -> md_open
518 */
519 struct mutex open_mutex;
520 struct mutex reconfig_mutex;
521 atomic_t active; /* general refcount */
522 atomic_t openers; /* number of active opens */
523
524 int changed; /* True if we might need to
525 * reread partition info */
526 int degraded; /* whether md should consider
527 * adding a spare
528 */
529
530 unsigned long normal_io_events; /* IO event timestamp */
531 atomic_t recovery_active; /* blocks scheduled, but not written */
532 wait_queue_head_t recovery_wait;
533 sector_t resync_offset;
534 sector_t resync_min; /* user requested sync
535 * starts here */
536 sector_t resync_max; /* resync should pause
537 * when it gets here */
538
539 struct kernfs_node *sysfs_state; /* handle for 'array_state'
540 * file in sysfs.
541 */
542 struct kernfs_node *sysfs_action; /* handle for 'sync_action' */
543 struct kernfs_node *sysfs_completed; /*handle for 'sync_completed' */
544 struct kernfs_node *sysfs_degraded; /*handle for 'degraded' */
545 struct kernfs_node *sysfs_level; /*handle for 'level' */
546
547 /* used for delayed sysfs removal */
548 struct work_struct del_work;
549 /* used for register new sync thread */
550 struct work_struct sync_work;
551
552 /* "lock" protects:
553 * flush_bio transition from NULL to !NULL
554 * rdev superblocks, events
555 * clearing MD_CHANGE_*
556 * in_sync - and related safemode and MD_CHANGE changes
557 * pers (also protected by reconfig_mutex and pending IO).
558 * clearing ->bitmap
559 * clearing ->bitmap_info.file
560 * changing ->resync_{min,max}
561 * setting MD_RECOVERY_RUNNING (which interacts with resync_{min,max})
562 */
563 spinlock_t lock;
564 wait_queue_head_t sb_wait; /* for waiting on superblock updates */
565 atomic_t pending_writes; /* number of active superblock writes */
566
567 unsigned int safemode; /* if set, update "clean" superblock
568 * when no writes pending.
569 */
570 unsigned int safemode_delay;
571 struct timer_list safemode_timer;
572 struct percpu_ref writes_pending;
573 int sync_checkers; /* # of threads checking writes_pending */
574
575 enum md_submodule_id bitmap_id;
576 void *bitmap; /* the bitmap for the device */
577 struct bitmap_operations *bitmap_ops;
578 struct {
579 struct file *file; /* the bitmap file */
580 loff_t offset; /* offset from superblock of
581 * start of bitmap. May be
582 * negative, but not '0'
583 * For external metadata, offset
584 * from start of device.
585 */
586 unsigned long space; /* space available at this offset */
587 loff_t default_offset; /* this is the offset to use when
588 * hot-adding a bitmap. It should
589 * eventually be settable by sysfs.
590 */
591 unsigned long default_space; /* space available at
592 * default offset */
593 struct mutex mutex;
594 unsigned long chunksize;
595 unsigned long daemon_sleep; /* how many jiffies between updates? */
596 unsigned long max_write_behind; /* write-behind mode */
597 int external;
598 int nodes; /* Maximum number of nodes in the cluster */
599 char cluster_name[64]; /* Name of the cluster */
600 } bitmap_info;
601
602 atomic_t max_corr_read_errors; /* max read retries */
603 struct list_head all_mddevs;
604
605 const struct attribute_group *to_remove;
606
607 struct bio_set bio_set;
608 struct bio_set sync_set; /* for sync operations like
609 * metadata and bitmap writes
610 */
611 struct bio_set io_clone_set;
612
613 struct work_struct event_work; /* used by dm to report failure event */
614 mempool_t *serial_info_pool;
615 void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
616 struct md_cluster_info *cluster_info;
617 struct md_cluster_operations *cluster_ops;
618 unsigned int good_device_nr; /* good device num within cluster raid */
619 unsigned int noio_flag; /* for memalloc scope API */
620
621 /*
622 * Temporarily store rdev that will be finally removed when
623 * reconfig_mutex is unlocked, protected by reconfig_mutex.
624 */
625 struct list_head deleting;
626
627 /* The sequence number for sync thread */
628 atomic_t sync_seq;
629 };
630
631 enum recovery_flags {
632 /* flags for sync thread running status */
633
634 /*
635 * set when one of sync action is set and new sync thread need to be
636 * registered, or just add/remove spares from conf.
637 */
638 MD_RECOVERY_NEEDED,
639 /* sync thread is running, or about to be started */
640 MD_RECOVERY_RUNNING,
641 /* sync thread needs to be aborted for some reason */
642 MD_RECOVERY_INTR,
643 /* sync thread is done and is waiting to be unregistered */
644 MD_RECOVERY_DONE,
645 /* running sync thread must abort immediately, and not restart */
646 MD_RECOVERY_FROZEN,
647 /* waiting for pers->start() to finish */
648 MD_RECOVERY_WAIT,
649
650 /* flags determines sync action, see details in enum sync_action */
651
652 /* if just this flag is set, action is resync. */
653 MD_RECOVERY_SYNC,
654 /*
655 * paired with MD_RECOVERY_SYNC, if MD_RECOVERY_CHECK is not set,
656 * action is repair, means user requested resync.
657 */
658 MD_RECOVERY_REQUESTED,
659 /*
660 * paired with MD_RECOVERY_SYNC and MD_RECOVERY_REQUESTED, action is
661 * check.
662 */
663 MD_RECOVERY_CHECK,
664 /* recovery, or need to try it */
665 MD_RECOVERY_RECOVER,
666 /* reshape */
667 MD_RECOVERY_RESHAPE,
668 /* remote node is running resync thread */
669 MD_RESYNCING_REMOTE,
670 /* raid456 lazy initial recover */
671 MD_RECOVERY_LAZY_RECOVER,
672 };
673
674 enum md_ro_state {
675 MD_RDWR,
676 MD_RDONLY,
677 MD_AUTO_READ,
678 MD_MAX_STATE
679 };
680
md_is_rdwr(struct mddev * mddev)681 static inline bool md_is_rdwr(struct mddev *mddev)
682 {
683 return (mddev->ro == MD_RDWR);
684 }
685
reshape_interrupted(struct mddev * mddev)686 static inline bool reshape_interrupted(struct mddev *mddev)
687 {
688 /* reshape never start */
689 if (mddev->reshape_position == MaxSector)
690 return false;
691
692 /* interrupted */
693 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
694 return true;
695
696 /* running reshape will be interrupted soon. */
697 if (test_bit(MD_RECOVERY_WAIT, &mddev->recovery) ||
698 test_bit(MD_RECOVERY_INTR, &mddev->recovery) ||
699 test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
700 return true;
701
702 return false;
703 }
704
mddev_lock(struct mddev * mddev)705 static inline int __must_check mddev_lock(struct mddev *mddev)
706 {
707 int ret;
708
709 ret = mutex_lock_interruptible(&mddev->reconfig_mutex);
710
711 /* MD_DELETED is set in do_md_stop with reconfig_mutex.
712 * So check it here.
713 */
714 if (!ret && test_bit(MD_DELETED, &mddev->flags)) {
715 ret = -ENODEV;
716 mutex_unlock(&mddev->reconfig_mutex);
717 }
718
719 return ret;
720 }
721
722 /* Sometimes we need to take the lock in a situation where
723 * failure due to interrupts is not acceptable.
724 * It doesn't need to check MD_DELETED here, the owner which
725 * holds the lock here can't be stopped. And all paths can't
726 * call this function after do_md_stop.
727 */
mddev_lock_nointr(struct mddev * mddev)728 static inline void mddev_lock_nointr(struct mddev *mddev)
729 {
730 mutex_lock(&mddev->reconfig_mutex);
731 }
732
mddev_trylock(struct mddev * mddev)733 static inline int mddev_trylock(struct mddev *mddev)
734 {
735 int ret;
736
737 ret = mutex_trylock(&mddev->reconfig_mutex);
738 if (ret && test_bit(MD_DELETED, &mddev->flags)) {
739 ret = 0;
740 mutex_unlock(&mddev->reconfig_mutex);
741 }
742 return ret;
743 }
744 extern void mddev_unlock(struct mddev *mddev);
745
746 struct md_personality
747 {
748 struct md_submodule_head head;
749
750 bool __must_check (*make_request)(struct mddev *mddev, struct bio *bio);
751 /*
752 * start up works that do NOT require md_thread. tasks that
753 * requires md_thread should go into start()
754 */
755 int (*run)(struct mddev *mddev);
756 /* start up works that require md threads */
757 int (*start)(struct mddev *mddev);
758 void (*free)(struct mddev *mddev, void *priv);
759 void (*status)(struct seq_file *seq, struct mddev *mddev);
760 /* error_handler must set ->faulty and clear ->in_sync
761 * if appropriate, and should abort recovery if needed
762 */
763 void (*error_handler)(struct mddev *mddev, struct md_rdev *rdev);
764 int (*hot_add_disk) (struct mddev *mddev, struct md_rdev *rdev);
765 int (*hot_remove_disk) (struct mddev *mddev, struct md_rdev *rdev);
766 int (*spare_active) (struct mddev *mddev);
767 sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr,
768 sector_t max_sector, int *skipped);
769 int (*resize) (struct mddev *mddev, sector_t sectors);
770 sector_t (*size) (struct mddev *mddev, sector_t sectors, int raid_disks);
771 int (*check_reshape) (struct mddev *mddev);
772 int (*start_reshape) (struct mddev *mddev);
773 void (*finish_reshape) (struct mddev *mddev);
774 void (*update_reshape_pos) (struct mddev *mddev);
775 void (*prepare_suspend) (struct mddev *mddev);
776 /* quiesce suspends or resumes internal processing.
777 * 1 - stop new actions and wait for action io to complete
778 * 0 - return to normal behaviour
779 */
780 void (*quiesce) (struct mddev *mddev, int quiesce);
781 /* takeover is used to transition an array from one
782 * personality to another. The new personality must be able
783 * to handle the data in the current layout.
784 * e.g. 2drive raid1 -> 2drive raid5
785 * ndrive raid5 -> degraded n+1drive raid6 with special layout
786 * If the takeover succeeds, a new 'private' structure is returned.
787 * This needs to be installed and then ->run used to activate the
788 * array.
789 */
790 void *(*takeover) (struct mddev *mddev);
791 /* Changes the consistency policy of an active array. */
792 int (*change_consistency_policy)(struct mddev *mddev, const char *buf);
793 /* convert io ranges from array to bitmap */
794 void (*bitmap_sector)(struct mddev *mddev, sector_t *offset,
795 unsigned long *sectors);
796 };
797
798 struct md_sysfs_entry {
799 struct attribute attr;
800 ssize_t (*show)(struct mddev *, char *);
801 ssize_t (*store)(struct mddev *, const char *, size_t);
802 };
803
sysfs_get_dirent_safe(struct kernfs_node * sd,char * name)804 static inline struct kernfs_node *sysfs_get_dirent_safe(struct kernfs_node *sd, char *name)
805 {
806 if (sd)
807 return sysfs_get_dirent(sd, name);
808 return sd;
809 }
sysfs_notify_dirent_safe(struct kernfs_node * sd)810 static inline void sysfs_notify_dirent_safe(struct kernfs_node *sd)
811 {
812 if (sd)
813 sysfs_notify_dirent(sd);
814 }
815
mdname(struct mddev * mddev)816 static inline char * mdname (struct mddev * mddev)
817 {
818 return mddev->gendisk ? mddev->gendisk->disk_name : "mdX";
819 }
820
sysfs_link_rdev(struct mddev * mddev,struct md_rdev * rdev)821 static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev)
822 {
823 char nm[20];
824 if (!test_bit(Replacement, &rdev->flags) &&
825 !test_bit(Journal, &rdev->flags) &&
826 mddev->kobj.sd) {
827 sprintf(nm, "rd%d", rdev->raid_disk);
828 return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
829 } else
830 return 0;
831 }
832
sysfs_unlink_rdev(struct mddev * mddev,struct md_rdev * rdev)833 static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev)
834 {
835 char nm[20];
836 if (!test_bit(Replacement, &rdev->flags) &&
837 !test_bit(Journal, &rdev->flags) &&
838 mddev->kobj.sd) {
839 sprintf(nm, "rd%d", rdev->raid_disk);
840 sysfs_remove_link(&mddev->kobj, nm);
841 }
842 }
843
844 /*
845 * iterates through some rdev ringlist. It's safe to remove the
846 * current 'rdev'. Dont touch 'tmp' though.
847 */
848 #define rdev_for_each_list(rdev, tmp, head) \
849 list_for_each_entry_safe(rdev, tmp, head, same_set)
850
851 /*
852 * iterates through the 'same array disks' ringlist
853 */
854 #define rdev_for_each(rdev, mddev) \
855 list_for_each_entry(rdev, &((mddev)->disks), same_set)
856
857 #define rdev_for_each_safe(rdev, tmp, mddev) \
858 list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set)
859
860 #define rdev_for_each_rcu(rdev, mddev) \
861 list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set)
862
863 struct md_thread {
864 void (*run) (struct md_thread *thread);
865 struct mddev *mddev;
866 wait_queue_head_t wqueue;
867 unsigned long flags;
868 struct task_struct *tsk;
869 unsigned long timeout;
870 void *private;
871 };
872
873 struct md_io_clone {
874 struct mddev *mddev;
875 struct bio *orig_bio;
876 unsigned long start_time;
877 sector_t offset;
878 unsigned long sectors;
879 enum stat_group rw;
880 struct bio bio_clone;
881 };
882
883 #define THREAD_WAKEUP 0
884
885 #define md_wakeup_thread(thread) do { \
886 rcu_read_lock(); \
887 __md_wakeup_thread(thread); \
888 rcu_read_unlock(); \
889 } while (0)
890
safe_put_page(struct page * p)891 static inline void safe_put_page(struct page *p)
892 {
893 if (p) put_page(p);
894 }
895
896 int register_md_submodule(struct md_submodule_head *msh);
897 void unregister_md_submodule(struct md_submodule_head *msh);
898
899 extern struct md_thread *md_register_thread(
900 void (*run)(struct md_thread *thread),
901 struct mddev *mddev,
902 const char *name);
903 extern void md_unregister_thread(struct mddev *mddev, struct md_thread __rcu **threadp);
904 extern void __md_wakeup_thread(struct md_thread __rcu *thread);
905 extern void md_check_recovery(struct mddev *mddev);
906 extern void md_reap_sync_thread(struct mddev *mddev);
907 extern enum sync_action md_sync_action(struct mddev *mddev);
908 extern enum sync_action md_sync_action_by_name(const char *page);
909 extern const char *md_sync_action_name(enum sync_action action);
910 extern void md_write_start(struct mddev *mddev, struct bio *bi);
911 extern void md_write_inc(struct mddev *mddev, struct bio *bi);
912 extern void md_write_end(struct mddev *mddev);
913 extern void md_done_sync(struct mddev *mddev, int blocks);
914 extern void md_sync_error(struct mddev *mddev);
915 extern void md_error(struct mddev *mddev, struct md_rdev *rdev);
916 extern void md_finish_reshape(struct mddev *mddev);
917 void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
918 struct bio *bio, sector_t start, sector_t size);
919 void md_account_bio(struct mddev *mddev, struct bio **bio);
920 void md_free_cloned_bio(struct bio *bio);
921
922 extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio);
923 void md_write_metadata(struct mddev *mddev, struct md_rdev *rdev,
924 sector_t sector, int size, struct page *page,
925 unsigned int offset);
926 extern int md_super_wait(struct mddev *mddev);
927 extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
928 struct page *page, blk_opf_t opf, bool metadata_op);
929 extern void md_do_sync(struct md_thread *thread);
930 extern void md_new_event(void);
931 extern void md_allow_write(struct mddev *mddev);
932 extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev);
933 extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors);
934 extern int md_check_no_bitmap(struct mddev *mddev);
935 extern int md_integrity_register(struct mddev *mddev);
936 extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
937
938 extern int mddev_init(struct mddev *mddev);
939 extern void mddev_destroy(struct mddev *mddev);
940 void md_init_stacking_limits(struct queue_limits *lim);
941 struct mddev *md_alloc(dev_t dev, char *name);
942 void mddev_put(struct mddev *mddev);
943 extern int md_run(struct mddev *mddev);
944 extern int md_start(struct mddev *mddev);
945 extern void md_stop(struct mddev *mddev);
946 extern void md_stop_writes(struct mddev *mddev);
947 extern int md_rdev_init(struct md_rdev *rdev);
948 extern void md_rdev_clear(struct md_rdev *rdev);
949
950 extern bool md_handle_request(struct mddev *mddev, struct bio *bio);
951 extern int mddev_suspend(struct mddev *mddev, bool interruptible);
952 extern void mddev_resume(struct mddev *mddev);
953 extern void md_idle_sync_thread(struct mddev *mddev);
954 extern void md_frozen_sync_thread(struct mddev *mddev);
955 extern void md_unfrozen_sync_thread(struct mddev *mddev);
956
957 extern void md_update_sb(struct mddev *mddev, int force);
958 extern void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev);
959 extern void mddev_destroy_serial_pool(struct mddev *mddev,
960 struct md_rdev *rdev);
961 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr);
962 struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev);
963
is_rdev_broken(struct md_rdev * rdev)964 static inline bool is_rdev_broken(struct md_rdev *rdev)
965 {
966 return !disk_live(rdev->bdev->bd_disk);
967 }
968
rdev_dec_pending(struct md_rdev * rdev,struct mddev * mddev)969 static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
970 {
971 int faulty = test_bit(Faulty, &rdev->flags);
972 if (atomic_dec_and_test(&rdev->nr_pending) && faulty) {
973 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
974 md_wakeup_thread(mddev->thread);
975 }
976 }
977
mddev_is_clustered(struct mddev * mddev)978 static inline int mddev_is_clustered(struct mddev *mddev)
979 {
980 return mddev->cluster_info && mddev->bitmap_info.nodes > 1;
981 }
982
983 /* clear unsupported mddev_flags */
mddev_clear_unsupported_flags(struct mddev * mddev,unsigned long unsupported_flags)984 static inline void mddev_clear_unsupported_flags(struct mddev *mddev,
985 unsigned long unsupported_flags)
986 {
987 mddev->flags &= ~unsupported_flags;
988 }
989
mddev_check_write_zeroes(struct mddev * mddev,struct bio * bio)990 static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio)
991 {
992 if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
993 !bio->bi_bdev->bd_disk->queue->limits.max_write_zeroes_sectors)
994 mddev->gendisk->queue->limits.max_write_zeroes_sectors = 0;
995 }
996
mddev_suspend_and_lock(struct mddev * mddev)997 static inline int mddev_suspend_and_lock(struct mddev *mddev)
998 {
999 int ret;
1000
1001 ret = mddev_suspend(mddev, true);
1002 if (ret)
1003 return ret;
1004
1005 ret = mddev_lock(mddev);
1006 if (ret)
1007 mddev_resume(mddev);
1008
1009 return ret;
1010 }
1011
mddev_suspend_and_lock_nointr(struct mddev * mddev)1012 static inline void mddev_suspend_and_lock_nointr(struct mddev *mddev)
1013 {
1014 mddev_suspend(mddev, false);
1015 mutex_lock(&mddev->reconfig_mutex);
1016 }
1017
mddev_unlock_and_resume(struct mddev * mddev)1018 static inline void mddev_unlock_and_resume(struct mddev *mddev)
1019 {
1020 mddev_unlock(mddev);
1021 mddev_resume(mddev);
1022 }
1023
1024 struct mdu_array_info_s;
1025 struct mdu_disk_info_s;
1026
1027 extern int mdp_major;
1028 void md_autostart_arrays(int part);
1029 int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info);
1030 int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info);
1031 int do_md_run(struct mddev *mddev);
1032 #define MDDEV_STACK_INTEGRITY (1u << 0)
1033 int mddev_stack_rdev_limits(struct mddev *mddev, struct queue_limits *lim,
1034 unsigned int flags);
1035 int mddev_stack_new_rdev(struct mddev *mddev, struct md_rdev *rdev);
1036 void mddev_update_io_opt(struct mddev *mddev, unsigned int nr_stripes);
1037
1038 extern const struct block_device_operations md_fops;
1039
1040 /*
1041 * MD devices can be used undeneath by DM, in which case ->gendisk is NULL.
1042 */
mddev_is_dm(struct mddev * mddev)1043 static inline bool mddev_is_dm(struct mddev *mddev)
1044 {
1045 return !mddev->gendisk;
1046 }
1047
raid_is_456(struct mddev * mddev)1048 static inline bool raid_is_456(struct mddev *mddev)
1049 {
1050 return mddev->level == ID_RAID4 || mddev->level == ID_RAID5 ||
1051 mddev->level == ID_RAID6;
1052 }
1053
mddev_trace_remap(struct mddev * mddev,struct bio * bio,sector_t sector)1054 static inline void mddev_trace_remap(struct mddev *mddev, struct bio *bio,
1055 sector_t sector)
1056 {
1057 if (!mddev_is_dm(mddev))
1058 trace_block_bio_remap(bio, disk_devt(mddev->gendisk), sector);
1059 }
1060
rdev_blocked(struct md_rdev * rdev)1061 static inline bool rdev_blocked(struct md_rdev *rdev)
1062 {
1063 /*
1064 * Blocked will be set by error handler and cleared by daemon after
1065 * updating superblock, meanwhile write IO should be blocked to prevent
1066 * reading old data after power failure.
1067 */
1068 if (test_bit(Blocked, &rdev->flags))
1069 return true;
1070
1071 /*
1072 * Faulty device should not be accessed anymore, there is no need to
1073 * wait for bad block to be acknowledged.
1074 */
1075 if (test_bit(Faulty, &rdev->flags))
1076 return false;
1077
1078 /* rdev is blocked by badblocks. */
1079 if (test_bit(BlockedBadBlocks, &rdev->flags))
1080 return true;
1081
1082 return false;
1083 }
1084
1085 #define mddev_add_trace_msg(mddev, fmt, args...) \
1086 do { \
1087 if (!mddev_is_dm(mddev)) \
1088 blk_add_trace_msg((mddev)->gendisk->queue, fmt, ##args); \
1089 } while (0)
1090
1091 #endif /* _MD_MD_H */
1092