xref: /linux/drivers/md/md.c (revision 7fe6ac157b7e15c8976bd62ad7cb98e248884e83) !
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3    md.c : Multiple Devices driver for Linux
4      Copyright (C) 1998, 1999, 2000 Ingo Molnar
5 
6      completely rewritten, based on the MD driver code from Marc Zyngier
7 
8    Changes:
9 
10    - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
11    - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
12    - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
13    - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
14    - kmod support by: Cyrus Durgin
15    - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
16    - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
17 
18    - lots of fixes and improvements to the RAID1/RAID5 and generic
19      RAID code (such as request based resynchronization):
20 
21      Neil Brown <neilb@cse.unsw.edu.au>.
22 
23    - persistent bitmap code
24      Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
25 
26 
27    Errors, Warnings, etc.
28    Please use:
29      pr_crit() for error conditions that risk data loss
30      pr_err() for error conditions that are unexpected, like an IO error
31          or internal inconsistency
32      pr_warn() for error conditions that could have been predicated, like
33          adding a device to an array when it has incompatible metadata
34      pr_info() for every interesting, very rare events, like an array starting
35          or stopping, or resync starting or stopping
36      pr_debug() for everything else.
37 
38 */
39 
40 #include <linux/sched/mm.h>
41 #include <linux/sched/signal.h>
42 #include <linux/kthread.h>
43 #include <linux/blkdev.h>
44 #include <linux/blk-integrity.h>
45 #include <linux/badblocks.h>
46 #include <linux/sysctl.h>
47 #include <linux/seq_file.h>
48 #include <linux/fs.h>
49 #include <linux/poll.h>
50 #include <linux/ctype.h>
51 #include <linux/string.h>
52 #include <linux/hdreg.h>
53 #include <linux/proc_fs.h>
54 #include <linux/random.h>
55 #include <linux/major.h>
56 #include <linux/module.h>
57 #include <linux/reboot.h>
58 #include <linux/file.h>
59 #include <linux/compat.h>
60 #include <linux/delay.h>
61 #include <linux/raid/md_p.h>
62 #include <linux/raid/md_u.h>
63 #include <linux/raid/detect.h>
64 #include <linux/slab.h>
65 #include <linux/percpu-refcount.h>
66 #include <linux/part_stat.h>
67 
68 #include "md.h"
69 #include "md-bitmap.h"
70 #include "md-cluster.h"
71 
72 static const char *action_name[NR_SYNC_ACTIONS] = {
73 	[ACTION_RESYNC]		= "resync",
74 	[ACTION_RECOVER]	= "recover",
75 	[ACTION_CHECK]		= "check",
76 	[ACTION_REPAIR]		= "repair",
77 	[ACTION_RESHAPE]	= "reshape",
78 	[ACTION_FROZEN]		= "frozen",
79 	[ACTION_IDLE]		= "idle",
80 };
81 
82 static DEFINE_XARRAY(md_submodule);
83 
84 static const struct kobj_type md_ktype;
85 
86 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
87 
88 /*
89  * This workqueue is used for sync_work to register new sync_thread, and for
90  * del_work to remove rdev, and for event_work that is only set by dm-raid.
91  *
92  * Noted that sync_work will grab reconfig_mutex, hence never flush this
93  * workqueue whith reconfig_mutex grabbed.
94  */
95 static struct workqueue_struct *md_misc_wq;
96 
97 static int remove_and_add_spares(struct mddev *mddev,
98 				 struct md_rdev *this);
99 static void mddev_detach(struct mddev *mddev);
100 static void export_rdev(struct md_rdev *rdev);
101 static void md_wakeup_thread_directly(struct md_thread __rcu **thread);
102 
103 /*
104  * Default number of read corrections we'll attempt on an rdev
105  * before ejecting it from the array. We divide the read error
106  * count by 2 for every hour elapsed between read errors.
107  */
108 #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
109 /* Default safemode delay: 200 msec */
110 #define DEFAULT_SAFEMODE_DELAY ((200 * HZ)/1000 +1)
111 /*
112  * Current RAID-1,4,5,6,10 parallel reconstruction 'guaranteed speed limit'
113  * is sysctl_speed_limit_min, 1000 KB/sec by default, so the extra system load
114  * does not show up that much. Increase it if you want to have more guaranteed
115  * speed. Note that the RAID driver will use the maximum bandwidth
116  * sysctl_speed_limit_max, 200 MB/sec by default, if the IO subsystem is idle.
117  *
118  * Background sync IO speed control:
119  *
120  * - below speed min:
121  *   no limit;
122  * - above speed min and below speed max:
123  *   a) if mddev is idle, then no limit;
124  *   b) if mddev is busy handling normal IO, then limit inflight sync IO
125  *   to sync_io_depth;
126  * - above speed max:
127  *   sync IO can't be issued;
128  *
129  * Following configurations can be changed via /proc/sys/dev/raid/ for system
130  * or /sys/block/mdX/md/ for one array.
131  */
132 static int sysctl_speed_limit_min = 1000;
133 static int sysctl_speed_limit_max = 200000;
134 static int sysctl_sync_io_depth = 32;
135 
speed_min(struct mddev * mddev)136 static int speed_min(struct mddev *mddev)
137 {
138 	return mddev->sync_speed_min ?
139 		mddev->sync_speed_min : sysctl_speed_limit_min;
140 }
141 
speed_max(struct mddev * mddev)142 static int speed_max(struct mddev *mddev)
143 {
144 	return mddev->sync_speed_max ?
145 		mddev->sync_speed_max : sysctl_speed_limit_max;
146 }
147 
sync_io_depth(struct mddev * mddev)148 static int sync_io_depth(struct mddev *mddev)
149 {
150 	return mddev->sync_io_depth ?
151 		mddev->sync_io_depth : sysctl_sync_io_depth;
152 }
153 
rdev_uninit_serial(struct md_rdev * rdev)154 static void rdev_uninit_serial(struct md_rdev *rdev)
155 {
156 	if (!test_and_clear_bit(CollisionCheck, &rdev->flags))
157 		return;
158 
159 	kvfree(rdev->serial);
160 	rdev->serial = NULL;
161 }
162 
rdevs_uninit_serial(struct mddev * mddev)163 static void rdevs_uninit_serial(struct mddev *mddev)
164 {
165 	struct md_rdev *rdev;
166 
167 	rdev_for_each(rdev, mddev)
168 		rdev_uninit_serial(rdev);
169 }
170 
rdev_init_serial(struct md_rdev * rdev)171 static int rdev_init_serial(struct md_rdev *rdev)
172 {
173 	/* serial_nums equals with BARRIER_BUCKETS_NR */
174 	int i, serial_nums = 1 << ((PAGE_SHIFT - ilog2(sizeof(atomic_t))));
175 	struct serial_in_rdev *serial = NULL;
176 
177 	if (test_bit(CollisionCheck, &rdev->flags))
178 		return 0;
179 
180 	serial = kvmalloc(sizeof(struct serial_in_rdev) * serial_nums,
181 			  GFP_KERNEL);
182 	if (!serial)
183 		return -ENOMEM;
184 
185 	for (i = 0; i < serial_nums; i++) {
186 		struct serial_in_rdev *serial_tmp = &serial[i];
187 
188 		spin_lock_init(&serial_tmp->serial_lock);
189 		serial_tmp->serial_rb = RB_ROOT_CACHED;
190 	}
191 
192 	rdev->serial = serial;
193 	set_bit(CollisionCheck, &rdev->flags);
194 
195 	return 0;
196 }
197 
rdevs_init_serial(struct mddev * mddev)198 static int rdevs_init_serial(struct mddev *mddev)
199 {
200 	struct md_rdev *rdev;
201 	int ret = 0;
202 
203 	rdev_for_each(rdev, mddev) {
204 		ret = rdev_init_serial(rdev);
205 		if (ret)
206 			break;
207 	}
208 
209 	/* Free all resources if pool is not existed */
210 	if (ret && !mddev->serial_info_pool)
211 		rdevs_uninit_serial(mddev);
212 
213 	return ret;
214 }
215 
216 /*
217  * rdev needs to enable serial stuffs if it meets the conditions:
218  * 1. it is multi-queue device flaged with writemostly.
219  * 2. the write-behind mode is enabled.
220  */
rdev_need_serial(struct md_rdev * rdev)221 static int rdev_need_serial(struct md_rdev *rdev)
222 {
223 	return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 &&
224 		rdev->bdev->bd_disk->queue->nr_hw_queues != 1 &&
225 		test_bit(WriteMostly, &rdev->flags));
226 }
227 
228 /*
229  * Init resource for rdev(s), then create serial_info_pool if:
230  * 1. rdev is the first device which return true from rdev_enable_serial.
231  * 2. rdev is NULL, means we want to enable serialization for all rdevs.
232  */
mddev_create_serial_pool(struct mddev * mddev,struct md_rdev * rdev)233 void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev)
234 {
235 	int ret = 0;
236 
237 	if (rdev && !rdev_need_serial(rdev) &&
238 	    !test_bit(CollisionCheck, &rdev->flags))
239 		return;
240 
241 	if (!rdev)
242 		ret = rdevs_init_serial(mddev);
243 	else
244 		ret = rdev_init_serial(rdev);
245 	if (ret)
246 		return;
247 
248 	if (mddev->serial_info_pool == NULL) {
249 		/*
250 		 * already in memalloc noio context by
251 		 * mddev_suspend()
252 		 */
253 		mddev->serial_info_pool =
254 			mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
255 						sizeof(struct serial_info));
256 		if (!mddev->serial_info_pool) {
257 			rdevs_uninit_serial(mddev);
258 			pr_err("can't alloc memory pool for serialization\n");
259 		}
260 	}
261 }
262 
263 /*
264  * Free resource from rdev(s), and destroy serial_info_pool under conditions:
265  * 1. rdev is the last device flaged with CollisionCheck.
266  * 2. when bitmap is destroyed while policy is not enabled.
267  * 3. for disable policy, the pool is destroyed only when no rdev needs it.
268  */
mddev_destroy_serial_pool(struct mddev * mddev,struct md_rdev * rdev)269 void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev)
270 {
271 	if (rdev && !test_bit(CollisionCheck, &rdev->flags))
272 		return;
273 
274 	if (mddev->serial_info_pool) {
275 		struct md_rdev *temp;
276 		int num = 0; /* used to track if other rdevs need the pool */
277 
278 		rdev_for_each(temp, mddev) {
279 			if (!rdev) {
280 				if (!test_bit(MD_SERIALIZE_POLICY,
281 					      &mddev->flags) ||
282 				    !rdev_need_serial(temp))
283 					rdev_uninit_serial(temp);
284 				else
285 					num++;
286 			} else if (temp != rdev &&
287 				   test_bit(CollisionCheck, &temp->flags))
288 				num++;
289 		}
290 
291 		if (rdev)
292 			rdev_uninit_serial(rdev);
293 
294 		if (num)
295 			pr_info("The mempool could be used by other devices\n");
296 		else {
297 			mempool_destroy(mddev->serial_info_pool);
298 			mddev->serial_info_pool = NULL;
299 		}
300 	}
301 }
302 
303 static struct ctl_table_header *raid_table_header;
304 
305 static const struct ctl_table raid_table[] = {
306 	{
307 		.procname	= "speed_limit_min",
308 		.data		= &sysctl_speed_limit_min,
309 		.maxlen		= sizeof(int),
310 		.mode		= 0644,
311 		.proc_handler	= proc_dointvec,
312 	},
313 	{
314 		.procname	= "speed_limit_max",
315 		.data		= &sysctl_speed_limit_max,
316 		.maxlen		= sizeof(int),
317 		.mode		= 0644,
318 		.proc_handler	= proc_dointvec,
319 	},
320 	{
321 		.procname	= "sync_io_depth",
322 		.data		= &sysctl_sync_io_depth,
323 		.maxlen		= sizeof(int),
324 		.mode		= 0644,
325 		.proc_handler	= proc_dointvec,
326 	},
327 };
328 
329 static int start_readonly;
330 
331 /*
332  * The original mechanism for creating an md device is to create
333  * a device node in /dev and to open it.  This causes races with device-close.
334  * The preferred method is to write to the "new_array" module parameter.
335  * This can avoid races.
336  * Setting create_on_open to false disables the original mechanism
337  * so all the races disappear.
338  */
339 static bool create_on_open = true;
340 static bool legacy_async_del_gendisk = true;
341 static bool check_new_feature = true;
342 
343 /*
344  * We have a system wide 'event count' that is incremented
345  * on any 'interesting' event, and readers of /proc/mdstat
346  * can use 'poll' or 'select' to find out when the event
347  * count increases.
348  *
349  * Events are:
350  *  start array, stop array, error, add device, remove device,
351  *  start build, activate spare
352  */
353 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
354 static atomic_t md_event_count;
md_new_event(void)355 void md_new_event(void)
356 {
357 	atomic_inc(&md_event_count);
358 	wake_up(&md_event_waiters);
359 }
360 EXPORT_SYMBOL_GPL(md_new_event);
361 
362 /*
363  * Enables to iterate over all existing md arrays
364  * all_mddevs_lock protects this list.
365  */
366 static LIST_HEAD(all_mddevs);
367 static DEFINE_SPINLOCK(all_mddevs_lock);
368 
is_md_suspended(struct mddev * mddev)369 static bool is_md_suspended(struct mddev *mddev)
370 {
371 	return percpu_ref_is_dying(&mddev->active_io);
372 }
373 /* Rather than calling directly into the personality make_request function,
374  * IO requests come here first so that we can check if the device is
375  * being suspended pending a reconfiguration.
376  * We hold a refcount over the call to ->make_request.  By the time that
377  * call has finished, the bio has been linked into some internal structure
378  * and so is visible to ->quiesce(), so we don't need the refcount any more.
379  */
is_suspended(struct mddev * mddev,struct bio * bio)380 static bool is_suspended(struct mddev *mddev, struct bio *bio)
381 {
382 	if (is_md_suspended(mddev))
383 		return true;
384 	if (bio_data_dir(bio) != WRITE)
385 		return false;
386 	if (READ_ONCE(mddev->suspend_lo) >= READ_ONCE(mddev->suspend_hi))
387 		return false;
388 	if (bio->bi_iter.bi_sector >= READ_ONCE(mddev->suspend_hi))
389 		return false;
390 	if (bio_end_sector(bio) < READ_ONCE(mddev->suspend_lo))
391 		return false;
392 	return true;
393 }
394 
md_handle_request(struct mddev * mddev,struct bio * bio)395 bool md_handle_request(struct mddev *mddev, struct bio *bio)
396 {
397 check_suspended:
398 	if (is_suspended(mddev, bio)) {
399 		DEFINE_WAIT(__wait);
400 		/* Bail out if REQ_NOWAIT is set for the bio */
401 		if (bio->bi_opf & REQ_NOWAIT) {
402 			bio_wouldblock_error(bio);
403 			return true;
404 		}
405 		for (;;) {
406 			prepare_to_wait(&mddev->sb_wait, &__wait,
407 					TASK_UNINTERRUPTIBLE);
408 			if (!is_suspended(mddev, bio))
409 				break;
410 			schedule();
411 		}
412 		finish_wait(&mddev->sb_wait, &__wait);
413 	}
414 	if (!percpu_ref_tryget_live(&mddev->active_io))
415 		goto check_suspended;
416 
417 	if (!mddev->pers->make_request(mddev, bio)) {
418 		percpu_ref_put(&mddev->active_io);
419 		if (!mddev->gendisk && mddev->pers->prepare_suspend)
420 			return false;
421 		goto check_suspended;
422 	}
423 
424 	percpu_ref_put(&mddev->active_io);
425 	return true;
426 }
427 EXPORT_SYMBOL(md_handle_request);
428 
md_submit_bio(struct bio * bio)429 static void md_submit_bio(struct bio *bio)
430 {
431 	const int rw = bio_data_dir(bio);
432 	struct mddev *mddev = bio->bi_bdev->bd_disk->private_data;
433 
434 	if (mddev == NULL || mddev->pers == NULL) {
435 		bio_io_error(bio);
436 		return;
437 	}
438 
439 	if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) {
440 		bio_io_error(bio);
441 		return;
442 	}
443 
444 	bio = bio_split_to_limits(bio);
445 	if (!bio)
446 		return;
447 
448 	if (mddev->ro == MD_RDONLY && unlikely(rw == WRITE)) {
449 		if (bio_sectors(bio) != 0)
450 			bio->bi_status = BLK_STS_IOERR;
451 		bio_endio(bio);
452 		return;
453 	}
454 
455 	/* bio could be mergeable after passing to underlayer */
456 	bio->bi_opf &= ~REQ_NOMERGE;
457 
458 	md_handle_request(mddev, bio);
459 }
460 
461 /*
462  * Make sure no new requests are submitted to the device, and any requests that
463  * have been submitted are completely handled.
464  */
mddev_suspend(struct mddev * mddev,bool interruptible)465 int mddev_suspend(struct mddev *mddev, bool interruptible)
466 {
467 	int err = 0;
468 
469 	/*
470 	 * hold reconfig_mutex to wait for normal io will deadlock, because
471 	 * other context can't update super_block, and normal io can rely on
472 	 * updating super_block.
473 	 */
474 	lockdep_assert_not_held(&mddev->reconfig_mutex);
475 
476 	if (interruptible)
477 		err = mutex_lock_interruptible(&mddev->suspend_mutex);
478 	else
479 		mutex_lock(&mddev->suspend_mutex);
480 	if (err)
481 		return err;
482 
483 	if (mddev->suspended) {
484 		WRITE_ONCE(mddev->suspended, mddev->suspended + 1);
485 		mutex_unlock(&mddev->suspend_mutex);
486 		return 0;
487 	}
488 
489 	percpu_ref_kill(&mddev->active_io);
490 
491 	/*
492 	 * RAID456 IO can sleep in wait_for_reshape while still holding an
493 	 * active_io reference. If reshape is already interrupted or frozen,
494 	 * wake those waiters so they can abort and drop the reference instead
495 	 * of deadlocking suspend.
496 	 */
497 	if (mddev->pers && mddev->pers->prepare_suspend &&
498 	    reshape_interrupted(mddev))
499 		mddev->pers->prepare_suspend(mddev);
500 
501 	if (interruptible)
502 		err = wait_event_interruptible(mddev->sb_wait,
503 				percpu_ref_is_zero(&mddev->active_io));
504 	else
505 		wait_event(mddev->sb_wait,
506 				percpu_ref_is_zero(&mddev->active_io));
507 	if (err) {
508 		percpu_ref_resurrect(&mddev->active_io);
509 		mutex_unlock(&mddev->suspend_mutex);
510 		return err;
511 	}
512 
513 	/*
514 	 * For raid456, io might be waiting for reshape to make progress,
515 	 * allow new reshape to start while waiting for io to be done to
516 	 * prevent deadlock.
517 	 */
518 	WRITE_ONCE(mddev->suspended, mddev->suspended + 1);
519 
520 	/* restrict memory reclaim I/O during raid array is suspend */
521 	mddev->noio_flag = memalloc_noio_save();
522 
523 	mutex_unlock(&mddev->suspend_mutex);
524 	return 0;
525 }
526 EXPORT_SYMBOL_GPL(mddev_suspend);
527 
__mddev_resume(struct mddev * mddev,bool recovery_needed)528 static void __mddev_resume(struct mddev *mddev, bool recovery_needed)
529 {
530 	lockdep_assert_not_held(&mddev->reconfig_mutex);
531 
532 	mutex_lock(&mddev->suspend_mutex);
533 	WRITE_ONCE(mddev->suspended, mddev->suspended - 1);
534 	if (mddev->suspended) {
535 		mutex_unlock(&mddev->suspend_mutex);
536 		return;
537 	}
538 
539 	/* entred the memalloc scope from mddev_suspend() */
540 	memalloc_noio_restore(mddev->noio_flag);
541 
542 	percpu_ref_resurrect(&mddev->active_io);
543 	wake_up(&mddev->sb_wait);
544 
545 	if (recovery_needed)
546 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
547 	md_wakeup_thread(mddev->thread);
548 	md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
549 
550 	mutex_unlock(&mddev->suspend_mutex);
551 }
552 
mddev_resume(struct mddev * mddev)553 void mddev_resume(struct mddev *mddev)
554 {
555 	return __mddev_resume(mddev, true);
556 }
557 EXPORT_SYMBOL_GPL(mddev_resume);
558 
559 /* sync bdev before setting device to readonly or stopping raid*/
mddev_set_closing_and_sync_blockdev(struct mddev * mddev,int opener_num)560 static int mddev_set_closing_and_sync_blockdev(struct mddev *mddev, int opener_num)
561 {
562 	mutex_lock(&mddev->open_mutex);
563 	if (mddev->pers && atomic_read(&mddev->openers) > opener_num) {
564 		mutex_unlock(&mddev->open_mutex);
565 		return -EBUSY;
566 	}
567 	if (test_and_set_bit(MD_CLOSING, &mddev->flags)) {
568 		mutex_unlock(&mddev->open_mutex);
569 		return -EBUSY;
570 	}
571 	mutex_unlock(&mddev->open_mutex);
572 
573 	sync_blockdev(mddev->gendisk->part0);
574 	return 0;
575 }
576 
577 /*
578  * The only difference from bio_chain_endio() is that the current
579  * bi_status of bio does not affect the bi_status of parent.
580  */
md_end_flush(struct bio * bio)581 static void md_end_flush(struct bio *bio)
582 {
583 	struct bio *parent = bio->bi_private;
584 
585 	/*
586 	 * If any flush io error before the power failure,
587 	 * disk data may be lost.
588 	 */
589 	if (bio->bi_status)
590 		pr_err("md: %pg flush io error %d\n", bio->bi_bdev,
591 			blk_status_to_errno(bio->bi_status));
592 
593 	bio_put(bio);
594 	bio_endio(parent);
595 }
596 
md_flush_request(struct mddev * mddev,struct bio * bio)597 bool md_flush_request(struct mddev *mddev, struct bio *bio)
598 {
599 	struct md_rdev *rdev;
600 	struct bio *new;
601 
602 	/*
603 	 * md_flush_reqeust() should be called under md_handle_request() and
604 	 * 'active_io' is already grabbed. Hence it's safe to get rdev directly
605 	 * without rcu protection.
606 	 */
607 	WARN_ON(percpu_ref_is_zero(&mddev->active_io));
608 
609 	rdev_for_each(rdev, mddev) {
610 		if (rdev->raid_disk < 0 || test_bit(Faulty, &rdev->flags))
611 			continue;
612 
613 		new = bio_alloc_bioset(rdev->bdev, 0,
614 				       REQ_OP_WRITE | REQ_PREFLUSH, GFP_NOIO,
615 				       &mddev->bio_set);
616 		new->bi_private = bio;
617 		new->bi_end_io = md_end_flush;
618 		bio_inc_remaining(bio);
619 		submit_bio(new);
620 	}
621 
622 	if (bio_sectors(bio) == 0) {
623 		bio_endio(bio);
624 		return true;
625 	}
626 
627 	bio->bi_opf &= ~REQ_PREFLUSH;
628 	return false;
629 }
630 EXPORT_SYMBOL(md_flush_request);
631 
mddev_get(struct mddev * mddev)632 static inline struct mddev *mddev_get(struct mddev *mddev)
633 {
634 	lockdep_assert_held(&all_mddevs_lock);
635 
636 	if (test_bit(MD_DELETED, &mddev->flags))
637 		return NULL;
638 	atomic_inc(&mddev->active);
639 	return mddev;
640 }
641 
642 static void mddev_delayed_delete(struct work_struct *ws);
643 
__mddev_put(struct mddev * mddev)644 static void __mddev_put(struct mddev *mddev)
645 {
646 	if (mddev->raid_disks || !list_empty(&mddev->disks) ||
647 	    mddev->ctime || mddev->hold_active)
648 		return;
649 
650 	/*
651 	 * If array is freed by stopping array, MD_DELETED is set by
652 	 * do_md_stop(), MD_DELETED is still set here in case mddev is freed
653 	 * directly by closing a mddev that is created by create_on_open.
654 	 */
655 	set_bit(MD_DELETED, &mddev->flags);
656 	/*
657 	 * Call queue_work inside the spinlock so that flush_workqueue() after
658 	 * mddev_find will succeed in waiting for the work to be done.
659 	 */
660 	queue_work(md_misc_wq, &mddev->del_work);
661 }
662 
mddev_put_locked(struct mddev * mddev)663 static void mddev_put_locked(struct mddev *mddev)
664 {
665 	if (atomic_dec_and_test(&mddev->active))
666 		__mddev_put(mddev);
667 }
668 
mddev_put(struct mddev * mddev)669 void mddev_put(struct mddev *mddev)
670 {
671 	if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
672 		return;
673 
674 	__mddev_put(mddev);
675 	spin_unlock(&all_mddevs_lock);
676 }
677 
678 static void md_safemode_timeout(struct timer_list *t);
679 static void md_start_sync(struct work_struct *ws);
680 
active_io_release(struct percpu_ref * ref)681 static void active_io_release(struct percpu_ref *ref)
682 {
683 	struct mddev *mddev = container_of(ref, struct mddev, active_io);
684 
685 	wake_up(&mddev->sb_wait);
686 }
687 
no_op(struct percpu_ref * r)688 static void no_op(struct percpu_ref *r) {}
689 
mddev_set_bitmap_ops(struct mddev * mddev)690 static bool mddev_set_bitmap_ops(struct mddev *mddev)
691 {
692 	struct bitmap_operations *old = mddev->bitmap_ops;
693 	struct md_submodule_head *head;
694 
695 	if (mddev->bitmap_id == ID_BITMAP_NONE ||
696 	    (old && old->head.id == mddev->bitmap_id))
697 		return true;
698 
699 	xa_lock(&md_submodule);
700 	head = xa_load(&md_submodule, mddev->bitmap_id);
701 
702 	if (!head) {
703 		pr_warn("md: can't find bitmap id %d\n", mddev->bitmap_id);
704 		goto err;
705 	}
706 
707 	if (head->type != MD_BITMAP) {
708 		pr_warn("md: invalid bitmap id %d\n", mddev->bitmap_id);
709 		goto err;
710 	}
711 
712 	mddev->bitmap_ops = (void *)head;
713 	xa_unlock(&md_submodule);
714 
715 	if (!mddev_is_dm(mddev) && mddev->bitmap_ops->group) {
716 		if (sysfs_create_group(&mddev->kobj, mddev->bitmap_ops->group))
717 			pr_warn("md: cannot register extra bitmap attributes for %s\n",
718 				mdname(mddev));
719 		else
720 			/*
721 			 * Inform user with KOBJ_CHANGE about new bitmap
722 			 * attributes.
723 			 */
724 			kobject_uevent(&mddev->kobj, KOBJ_CHANGE);
725 	}
726 	return true;
727 
728 err:
729 	xa_unlock(&md_submodule);
730 	return false;
731 }
732 
mddev_clear_bitmap_ops(struct mddev * mddev)733 static void mddev_clear_bitmap_ops(struct mddev *mddev)
734 {
735 	if (!mddev_is_dm(mddev) && mddev->bitmap_ops &&
736 	    mddev->bitmap_ops->group)
737 		sysfs_remove_group(&mddev->kobj, mddev->bitmap_ops->group);
738 
739 	mddev->bitmap_ops = NULL;
740 }
741 
mddev_init(struct mddev * mddev)742 int mddev_init(struct mddev *mddev)
743 {
744 	int err = 0;
745 
746 	if (!IS_ENABLED(CONFIG_MD_BITMAP))
747 		mddev->bitmap_id = ID_BITMAP_NONE;
748 	else
749 		mddev->bitmap_id = ID_BITMAP;
750 
751 	if (percpu_ref_init(&mddev->active_io, active_io_release,
752 			    PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
753 		return -ENOMEM;
754 
755 	if (percpu_ref_init(&mddev->writes_pending, no_op,
756 			    PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
757 		err = -ENOMEM;
758 		goto exit_acitve_io;
759 	}
760 
761 	err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
762 	if (err)
763 		goto exit_writes_pending;
764 
765 	err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
766 	if (err)
767 		goto exit_bio_set;
768 
769 	err = bioset_init(&mddev->io_clone_set, BIO_POOL_SIZE,
770 			  offsetof(struct md_io_clone, bio_clone), 0);
771 	if (err)
772 		goto exit_sync_set;
773 
774 	/* We want to start with the refcount at zero */
775 	percpu_ref_put(&mddev->writes_pending);
776 
777 	mutex_init(&mddev->open_mutex);
778 	mutex_init(&mddev->reconfig_mutex);
779 	mutex_init(&mddev->suspend_mutex);
780 	mutex_init(&mddev->bitmap_info.mutex);
781 	INIT_LIST_HEAD(&mddev->disks);
782 	INIT_LIST_HEAD(&mddev->all_mddevs);
783 	INIT_LIST_HEAD(&mddev->deleting);
784 	timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0);
785 	atomic_set(&mddev->active, 1);
786 	atomic_set(&mddev->openers, 0);
787 	atomic_set(&mddev->sync_seq, 0);
788 	spin_lock_init(&mddev->lock);
789 	init_waitqueue_head(&mddev->sb_wait);
790 	init_waitqueue_head(&mddev->recovery_wait);
791 	mddev->reshape_position = MaxSector;
792 	mddev->reshape_backwards = 0;
793 	mddev->last_sync_action = ACTION_IDLE;
794 	mddev->resync_min = 0;
795 	mddev->resync_max = MaxSector;
796 	mddev->level = LEVEL_NONE;
797 
798 	INIT_WORK(&mddev->sync_work, md_start_sync);
799 	INIT_WORK(&mddev->del_work, mddev_delayed_delete);
800 
801 	return 0;
802 
803 exit_sync_set:
804 	bioset_exit(&mddev->sync_set);
805 exit_bio_set:
806 	bioset_exit(&mddev->bio_set);
807 exit_writes_pending:
808 	percpu_ref_exit(&mddev->writes_pending);
809 exit_acitve_io:
810 	percpu_ref_exit(&mddev->active_io);
811 	return err;
812 }
813 EXPORT_SYMBOL_GPL(mddev_init);
814 
mddev_destroy(struct mddev * mddev)815 void mddev_destroy(struct mddev *mddev)
816 {
817 	bioset_exit(&mddev->bio_set);
818 	bioset_exit(&mddev->sync_set);
819 	bioset_exit(&mddev->io_clone_set);
820 	percpu_ref_exit(&mddev->active_io);
821 	percpu_ref_exit(&mddev->writes_pending);
822 }
823 EXPORT_SYMBOL_GPL(mddev_destroy);
824 
mddev_find_locked(dev_t unit)825 static struct mddev *mddev_find_locked(dev_t unit)
826 {
827 	struct mddev *mddev;
828 
829 	list_for_each_entry(mddev, &all_mddevs, all_mddevs)
830 		if (mddev->unit == unit)
831 			return mddev;
832 
833 	return NULL;
834 }
835 
836 /* find an unused unit number */
mddev_alloc_unit(void)837 static dev_t mddev_alloc_unit(void)
838 {
839 	static int next_minor = 512;
840 	int start = next_minor;
841 	bool is_free = 0;
842 	dev_t dev = 0;
843 
844 	while (!is_free) {
845 		dev = MKDEV(MD_MAJOR, next_minor);
846 		next_minor++;
847 		if (next_minor > MINORMASK)
848 			next_minor = 0;
849 		if (next_minor == start)
850 			return 0;		/* Oh dear, all in use. */
851 		is_free = !mddev_find_locked(dev);
852 	}
853 
854 	return dev;
855 }
856 
mddev_alloc(dev_t unit)857 static struct mddev *mddev_alloc(dev_t unit)
858 {
859 	struct mddev *new;
860 	int error;
861 
862 	if (unit && MAJOR(unit) != MD_MAJOR)
863 		unit &= ~((1 << MdpMinorShift) - 1);
864 
865 	new = kzalloc_obj(*new);
866 	if (!new)
867 		return ERR_PTR(-ENOMEM);
868 
869 	error = mddev_init(new);
870 	if (error)
871 		goto out_free_new;
872 
873 	spin_lock(&all_mddevs_lock);
874 	if (unit) {
875 		error = -EEXIST;
876 		if (mddev_find_locked(unit))
877 			goto out_destroy_new;
878 		new->unit = unit;
879 		if (MAJOR(unit) == MD_MAJOR)
880 			new->md_minor = MINOR(unit);
881 		else
882 			new->md_minor = MINOR(unit) >> MdpMinorShift;
883 		new->hold_active = UNTIL_IOCTL;
884 	} else {
885 		error = -ENODEV;
886 		new->unit = mddev_alloc_unit();
887 		if (!new->unit)
888 			goto out_destroy_new;
889 		new->md_minor = MINOR(new->unit);
890 		new->hold_active = UNTIL_STOP;
891 	}
892 
893 	list_add(&new->all_mddevs, &all_mddevs);
894 	spin_unlock(&all_mddevs_lock);
895 	return new;
896 
897 out_destroy_new:
898 	spin_unlock(&all_mddevs_lock);
899 	mddev_destroy(new);
900 out_free_new:
901 	kfree(new);
902 	return ERR_PTR(error);
903 }
904 
mddev_free(struct mddev * mddev)905 static void mddev_free(struct mddev *mddev)
906 {
907 	spin_lock(&all_mddevs_lock);
908 	list_del(&mddev->all_mddevs);
909 	spin_unlock(&all_mddevs_lock);
910 
911 	mddev_destroy(mddev);
912 	kfree(mddev);
913 }
914 
915 static const struct attribute_group md_redundancy_group;
916 
mddev_unlock(struct mddev * mddev)917 void mddev_unlock(struct mddev *mddev)
918 {
919 	struct md_rdev *rdev;
920 	struct md_rdev *tmp;
921 	LIST_HEAD(delete);
922 
923 	if (!list_empty(&mddev->deleting))
924 		list_splice_init(&mddev->deleting, &delete);
925 
926 	if (mddev->to_remove) {
927 		/* These cannot be removed under reconfig_mutex as
928 		 * an access to the files will try to take reconfig_mutex
929 		 * while holding the file unremovable, which leads to
930 		 * a deadlock.
931 		 * So hold set sysfs_active while the remove in happeing,
932 		 * and anything else which might set ->to_remove or my
933 		 * otherwise change the sysfs namespace will fail with
934 		 * -EBUSY if sysfs_active is still set.
935 		 * We set sysfs_active under reconfig_mutex and elsewhere
936 		 * test it under the same mutex to ensure its correct value
937 		 * is seen.
938 		 */
939 		const struct attribute_group *to_remove = mddev->to_remove;
940 		mddev->to_remove = NULL;
941 		mddev->sysfs_active = 1;
942 		mutex_unlock(&mddev->reconfig_mutex);
943 
944 		if (mddev->kobj.sd) {
945 			if (to_remove != &md_redundancy_group)
946 				sysfs_remove_group(&mddev->kobj, to_remove);
947 			if (mddev->pers == NULL ||
948 			    mddev->pers->sync_request == NULL) {
949 				sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
950 				if (mddev->sysfs_action)
951 					sysfs_put(mddev->sysfs_action);
952 				if (mddev->sysfs_completed)
953 					sysfs_put(mddev->sysfs_completed);
954 				if (mddev->sysfs_degraded)
955 					sysfs_put(mddev->sysfs_degraded);
956 				mddev->sysfs_action = NULL;
957 				mddev->sysfs_completed = NULL;
958 				mddev->sysfs_degraded = NULL;
959 			}
960 		}
961 		mddev->sysfs_active = 0;
962 	} else
963 		mutex_unlock(&mddev->reconfig_mutex);
964 
965 	md_wakeup_thread(mddev->thread);
966 	wake_up(&mddev->sb_wait);
967 
968 	list_for_each_entry_safe(rdev, tmp, &delete, same_set) {
969 		list_del_init(&rdev->same_set);
970 		kobject_del(&rdev->kobj);
971 		export_rdev(rdev);
972 	}
973 
974 	if (!legacy_async_del_gendisk) {
975 		/*
976 		 * Call del_gendisk after release reconfig_mutex to avoid
977 		 * deadlock (e.g. call del_gendisk under the lock and an
978 		 * access to sysfs files waits the lock)
979 		 * And MD_DELETED is only used for md raid which is set in
980 		 * do_md_stop. dm raid only uses md_stop to stop. So dm raid
981 		 * doesn't need to check MD_DELETED when getting reconfig lock
982 		 */
983 		if (test_bit(MD_DELETED, &mddev->flags) &&
984 		    !test_and_set_bit(MD_DO_DELETE, &mddev->flags)) {
985 			kobject_del(&mddev->kobj);
986 			del_gendisk(mddev->gendisk);
987 		}
988 	}
989 }
990 EXPORT_SYMBOL_GPL(mddev_unlock);
991 
md_find_rdev_nr_rcu(struct mddev * mddev,int nr)992 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr)
993 {
994 	struct md_rdev *rdev;
995 
996 	rdev_for_each_rcu(rdev, mddev)
997 		if (rdev->desc_nr == nr)
998 			return rdev;
999 
1000 	return NULL;
1001 }
1002 EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu);
1003 
find_rdev(struct mddev * mddev,dev_t dev)1004 static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
1005 {
1006 	struct md_rdev *rdev;
1007 
1008 	rdev_for_each(rdev, mddev)
1009 		if (rdev->bdev->bd_dev == dev)
1010 			return rdev;
1011 
1012 	return NULL;
1013 }
1014 
md_find_rdev_rcu(struct mddev * mddev,dev_t dev)1015 struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev)
1016 {
1017 	struct md_rdev *rdev;
1018 
1019 	rdev_for_each_rcu(rdev, mddev)
1020 		if (rdev->bdev->bd_dev == dev)
1021 			return rdev;
1022 
1023 	return NULL;
1024 }
1025 EXPORT_SYMBOL_GPL(md_find_rdev_rcu);
1026 
get_pers(int level,char * clevel)1027 static struct md_personality *get_pers(int level, char *clevel)
1028 {
1029 	struct md_personality *ret = NULL;
1030 	struct md_submodule_head *head;
1031 	unsigned long i;
1032 
1033 	xa_lock(&md_submodule);
1034 	xa_for_each(&md_submodule, i, head) {
1035 		if (head->type != MD_PERSONALITY)
1036 			continue;
1037 		if ((level != LEVEL_NONE && head->id == level) ||
1038 		    !strcmp(head->name, clevel)) {
1039 			if (try_module_get(head->owner))
1040 				ret = (void *)head;
1041 			break;
1042 		}
1043 	}
1044 	xa_unlock(&md_submodule);
1045 
1046 	if (!ret) {
1047 		if (level != LEVEL_NONE)
1048 			pr_warn("md: personality for level %d is not loaded!\n",
1049 				level);
1050 		else
1051 			pr_warn("md: personality for level %s is not loaded!\n",
1052 				clevel);
1053 	}
1054 
1055 	return ret;
1056 }
1057 
put_pers(struct md_personality * pers)1058 static void put_pers(struct md_personality *pers)
1059 {
1060 	module_put(pers->head.owner);
1061 }
1062 
1063 /* return the offset of the super block in 512byte sectors */
calc_dev_sboffset(struct md_rdev * rdev)1064 static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
1065 {
1066 	return MD_NEW_SIZE_SECTORS(bdev_nr_sectors(rdev->bdev));
1067 }
1068 
alloc_disk_sb(struct md_rdev * rdev)1069 static int alloc_disk_sb(struct md_rdev *rdev)
1070 {
1071 	rdev->sb_page = alloc_page(GFP_KERNEL);
1072 	if (!rdev->sb_page)
1073 		return -ENOMEM;
1074 	return 0;
1075 }
1076 
md_rdev_clear(struct md_rdev * rdev)1077 void md_rdev_clear(struct md_rdev *rdev)
1078 {
1079 	if (rdev->sb_page) {
1080 		put_page(rdev->sb_page);
1081 		rdev->sb_loaded = 0;
1082 		rdev->sb_page = NULL;
1083 		rdev->sb_start = 0;
1084 		rdev->sectors = 0;
1085 	}
1086 	if (rdev->bb_page) {
1087 		put_page(rdev->bb_page);
1088 		rdev->bb_page = NULL;
1089 	}
1090 	badblocks_exit(&rdev->badblocks);
1091 }
1092 EXPORT_SYMBOL_GPL(md_rdev_clear);
1093 
super_written(struct bio * bio)1094 static void super_written(struct bio *bio)
1095 {
1096 	struct md_rdev *rdev = bio->bi_private;
1097 	struct mddev *mddev = rdev->mddev;
1098 
1099 	if (bio->bi_status) {
1100 		pr_err("md: %s gets error=%d\n", __func__,
1101 		       blk_status_to_errno(bio->bi_status));
1102 		md_error(mddev, rdev);
1103 		if (!test_bit(Faulty, &rdev->flags)
1104 		    && (bio->bi_opf & MD_FAILFAST)) {
1105 			set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags);
1106 			set_bit(LastDev, &rdev->flags);
1107 		}
1108 	} else
1109 		clear_bit(LastDev, &rdev->flags);
1110 
1111 	bio_put(bio);
1112 
1113 	rdev_dec_pending(rdev, mddev);
1114 
1115 	if (atomic_dec_and_test(&mddev->pending_writes))
1116 		wake_up(&mddev->sb_wait);
1117 }
1118 
1119 /**
1120  * md_write_metadata - write metadata to underlying disk, including
1121  * array superblock, badblocks, bitmap superblock and bitmap bits.
1122  * @mddev:	the array to write
1123  * @rdev:	the underlying disk to write
1124  * @sector:	the offset to @rdev
1125  * @size:	the length of the metadata
1126  * @page:	the metadata
1127  * @offset:	the offset to @page
1128  *
1129  * Write @size bytes of @page start from @offset, to @sector of @rdev, Increment
1130  * mddev->pending_writes before returning, and decrement it on completion,
1131  * waking up sb_wait. Caller must call md_super_wait() after issuing io to all
1132  * rdev. If an error occurred, md_error() will be called, and the @rdev will be
1133  * kicked out from @mddev.
1134  */
md_write_metadata(struct mddev * mddev,struct md_rdev * rdev,sector_t sector,int size,struct page * page,unsigned int offset)1135 void md_write_metadata(struct mddev *mddev, struct md_rdev *rdev,
1136 		       sector_t sector, int size, struct page *page,
1137 		       unsigned int offset)
1138 {
1139 	struct bio *bio;
1140 
1141 	if (!page)
1142 		return;
1143 
1144 	if (test_bit(Faulty, &rdev->flags))
1145 		return;
1146 
1147 	bio = bio_alloc_bioset(rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev,
1148 			      1,
1149 			      REQ_OP_WRITE | REQ_SYNC | REQ_IDLE | REQ_META
1150 				  | REQ_PREFLUSH | REQ_FUA,
1151 			      GFP_NOIO, &mddev->sync_set);
1152 
1153 	atomic_inc(&rdev->nr_pending);
1154 
1155 	bio->bi_iter.bi_sector = sector;
1156 	__bio_add_page(bio, page, size, offset);
1157 	bio->bi_private = rdev;
1158 	bio->bi_end_io = super_written;
1159 
1160 	if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) &&
1161 	    test_bit(FailFast, &rdev->flags) &&
1162 	    !test_bit(LastDev, &rdev->flags))
1163 		bio->bi_opf |= MD_FAILFAST;
1164 
1165 	atomic_inc(&mddev->pending_writes);
1166 	submit_bio(bio);
1167 }
1168 
md_super_wait(struct mddev * mddev)1169 int md_super_wait(struct mddev *mddev)
1170 {
1171 	/* wait for all superblock writes that were scheduled to complete */
1172 	wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
1173 	if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags))
1174 		return -EAGAIN;
1175 	return 0;
1176 }
1177 
sync_page_io(struct md_rdev * rdev,sector_t sector,int size,struct page * page,blk_opf_t opf,bool metadata_op)1178 int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
1179 		 struct page *page, blk_opf_t opf, bool metadata_op)
1180 {
1181 	struct bio bio;
1182 	struct bio_vec bvec;
1183 
1184 	if (metadata_op && rdev->meta_bdev)
1185 		bio_init(&bio, rdev->meta_bdev, &bvec, 1, opf);
1186 	else
1187 		bio_init(&bio, rdev->bdev, &bvec, 1, opf);
1188 
1189 	if (metadata_op)
1190 		bio.bi_iter.bi_sector = sector + rdev->sb_start;
1191 	else if (rdev->mddev->reshape_position != MaxSector &&
1192 		 (rdev->mddev->reshape_backwards ==
1193 		  (sector >= rdev->mddev->reshape_position)))
1194 		bio.bi_iter.bi_sector = sector + rdev->new_data_offset;
1195 	else
1196 		bio.bi_iter.bi_sector = sector + rdev->data_offset;
1197 	__bio_add_page(&bio, page, size, 0);
1198 
1199 	submit_bio_wait(&bio);
1200 
1201 	return !bio.bi_status;
1202 }
1203 EXPORT_SYMBOL_GPL(sync_page_io);
1204 
read_disk_sb(struct md_rdev * rdev,int size)1205 static int read_disk_sb(struct md_rdev *rdev, int size)
1206 {
1207 	if (rdev->sb_loaded)
1208 		return 0;
1209 
1210 	if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, true))
1211 		goto fail;
1212 	rdev->sb_loaded = 1;
1213 	return 0;
1214 
1215 fail:
1216 	pr_err("md: disabled device %pg, could not read superblock.\n",
1217 	       rdev->bdev);
1218 	return -EINVAL;
1219 }
1220 
md_uuid_equal(mdp_super_t * sb1,mdp_super_t * sb2)1221 static int md_uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
1222 {
1223 	return	sb1->set_uuid0 == sb2->set_uuid0 &&
1224 		sb1->set_uuid1 == sb2->set_uuid1 &&
1225 		sb1->set_uuid2 == sb2->set_uuid2 &&
1226 		sb1->set_uuid3 == sb2->set_uuid3;
1227 }
1228 
md_sb_equal(mdp_super_t * sb1,mdp_super_t * sb2)1229 static int md_sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
1230 {
1231 	int ret;
1232 	mdp_super_t *tmp1, *tmp2;
1233 
1234 	tmp1 = kmalloc_obj(*tmp1);
1235 	tmp2 = kmalloc_obj(*tmp2);
1236 
1237 	if (!tmp1 || !tmp2) {
1238 		ret = 0;
1239 		goto abort;
1240 	}
1241 
1242 	*tmp1 = *sb1;
1243 	*tmp2 = *sb2;
1244 
1245 	/*
1246 	 * nr_disks is not constant
1247 	 */
1248 	tmp1->nr_disks = 0;
1249 	tmp2->nr_disks = 0;
1250 
1251 	ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
1252 abort:
1253 	kfree(tmp1);
1254 	kfree(tmp2);
1255 	return ret;
1256 }
1257 
md_csum_fold(u32 csum)1258 static u32 md_csum_fold(u32 csum)
1259 {
1260 	csum = (csum & 0xffff) + (csum >> 16);
1261 	return (csum & 0xffff) + (csum >> 16);
1262 }
1263 
calc_sb_csum(mdp_super_t * sb)1264 static unsigned int calc_sb_csum(mdp_super_t *sb)
1265 {
1266 	u64 newcsum = 0;
1267 	u32 *sb32 = (u32*)sb;
1268 	int i;
1269 	unsigned int disk_csum, csum;
1270 
1271 	disk_csum = sb->sb_csum;
1272 	sb->sb_csum = 0;
1273 
1274 	for (i = 0; i < MD_SB_BYTES/4 ; i++)
1275 		newcsum += sb32[i];
1276 	csum = (newcsum & 0xffffffff) + (newcsum>>32);
1277 
1278 #ifdef CONFIG_ALPHA
1279 	/* This used to use csum_partial, which was wrong for several
1280 	 * reasons including that different results are returned on
1281 	 * different architectures.  It isn't critical that we get exactly
1282 	 * the same return value as before (we always csum_fold before
1283 	 * testing, and that removes any differences).  However as we
1284 	 * know that csum_partial always returned a 16bit value on
1285 	 * alphas, do a fold to maximise conformity to previous behaviour.
1286 	 */
1287 	sb->sb_csum = md_csum_fold(disk_csum);
1288 #else
1289 	sb->sb_csum = disk_csum;
1290 #endif
1291 	return csum;
1292 }
1293 
1294 /*
1295  * Handle superblock details.
1296  * We want to be able to handle multiple superblock formats
1297  * so we have a common interface to them all, and an array of
1298  * different handlers.
1299  * We rely on user-space to write the initial superblock, and support
1300  * reading and updating of superblocks.
1301  * Interface methods are:
1302  *   int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version)
1303  *      loads and validates a superblock on dev.
1304  *      if refdev != NULL, compare superblocks on both devices
1305  *    Return:
1306  *      0 - dev has a superblock that is compatible with refdev
1307  *      1 - dev has a superblock that is compatible and newer than refdev
1308  *          so dev should be used as the refdev in future
1309  *     -EINVAL superblock incompatible or invalid
1310  *     -othererror e.g. -EIO
1311  *
1312  *   int validate_super(struct mddev *mddev, struct md_rdev *dev)
1313  *      Verify that dev is acceptable into mddev.
1314  *       The first time, mddev->raid_disks will be 0, and data from
1315  *       dev should be merged in.  Subsequent calls check that dev
1316  *       is new enough.  Return 0 or -EINVAL
1317  *
1318  *   void sync_super(struct mddev *mddev, struct md_rdev *dev)
1319  *     Update the superblock for rdev with data in mddev
1320  *     This does not write to disc.
1321  *
1322  */
1323 
1324 struct super_type  {
1325 	char		    *name;
1326 	struct module	    *owner;
1327 	int		    (*load_super)(struct md_rdev *rdev,
1328 					  struct md_rdev *refdev,
1329 					  int minor_version);
1330 	int		    (*validate_super)(struct mddev *mddev,
1331 					      struct md_rdev *freshest,
1332 					      struct md_rdev *rdev);
1333 	void		    (*sync_super)(struct mddev *mddev,
1334 					  struct md_rdev *rdev);
1335 	unsigned long long  (*rdev_size_change)(struct md_rdev *rdev,
1336 						sector_t num_sectors);
1337 	int		    (*allow_new_offset)(struct md_rdev *rdev,
1338 						unsigned long long new_offset);
1339 };
1340 
1341 /*
1342  * Check that the given mddev has no bitmap.
1343  *
1344  * This function is called from the run method of all personalities that do not
1345  * support bitmaps. It prints an error message and returns non-zero if mddev
1346  * has a bitmap. Otherwise, it returns 0.
1347  *
1348  */
md_check_no_bitmap(struct mddev * mddev)1349 int md_check_no_bitmap(struct mddev *mddev)
1350 {
1351 	if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
1352 		return 0;
1353 	pr_warn("%s: bitmaps are not supported for %s\n",
1354 		mdname(mddev), mddev->pers->head.name);
1355 	return 1;
1356 }
1357 EXPORT_SYMBOL(md_check_no_bitmap);
1358 
1359 /*
1360  * load_super for 0.90.0
1361  */
super_90_load(struct md_rdev * rdev,struct md_rdev * refdev,int minor_version)1362 static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
1363 {
1364 	mdp_super_t *sb;
1365 	int ret;
1366 	bool spare_disk = true;
1367 
1368 	/*
1369 	 * Calculate the position of the superblock (512byte sectors),
1370 	 * it's at the end of the disk.
1371 	 *
1372 	 * It also happens to be a multiple of 4Kb.
1373 	 */
1374 	rdev->sb_start = calc_dev_sboffset(rdev);
1375 
1376 	ret = read_disk_sb(rdev, MD_SB_BYTES);
1377 	if (ret)
1378 		return ret;
1379 
1380 	ret = -EINVAL;
1381 
1382 	sb = page_address(rdev->sb_page);
1383 
1384 	if (sb->md_magic != MD_SB_MAGIC) {
1385 		pr_warn("md: invalid raid superblock magic on %pg\n",
1386 			rdev->bdev);
1387 		goto abort;
1388 	}
1389 
1390 	if (sb->major_version != 0 ||
1391 	    sb->minor_version < 90 ||
1392 	    sb->minor_version > 91) {
1393 		pr_warn("Bad version number %d.%d on %pg\n",
1394 			sb->major_version, sb->minor_version, rdev->bdev);
1395 		goto abort;
1396 	}
1397 
1398 	if (sb->raid_disks <= 0)
1399 		goto abort;
1400 
1401 	if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
1402 		pr_warn("md: invalid superblock checksum on %pg\n", rdev->bdev);
1403 		goto abort;
1404 	}
1405 
1406 	rdev->preferred_minor = sb->md_minor;
1407 	rdev->data_offset = 0;
1408 	rdev->new_data_offset = 0;
1409 	rdev->sb_size = MD_SB_BYTES;
1410 	rdev->badblocks.shift = -1;
1411 
1412 	rdev->desc_nr = sb->this_disk.number;
1413 
1414 	/* not spare disk */
1415 	if (rdev->desc_nr >= 0 && rdev->desc_nr < MD_SB_DISKS &&
1416 	    sb->disks[rdev->desc_nr].state & ((1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
1417 		spare_disk = false;
1418 
1419 	if (!refdev) {
1420 		if (!spare_disk)
1421 			ret = 1;
1422 		else
1423 			ret = 0;
1424 	} else {
1425 		__u64 ev1, ev2;
1426 		mdp_super_t *refsb = page_address(refdev->sb_page);
1427 		if (!md_uuid_equal(refsb, sb)) {
1428 			pr_warn("md: %pg has different UUID to %pg\n",
1429 				rdev->bdev, refdev->bdev);
1430 			goto abort;
1431 		}
1432 		if (!md_sb_equal(refsb, sb)) {
1433 			pr_warn("md: %pg has same UUID but different superblock to %pg\n",
1434 				rdev->bdev, refdev->bdev);
1435 			goto abort;
1436 		}
1437 		ev1 = md_event(sb);
1438 		ev2 = md_event(refsb);
1439 
1440 		if (!spare_disk && ev1 > ev2)
1441 			ret = 1;
1442 		else
1443 			ret = 0;
1444 	}
1445 	rdev->sectors = rdev->sb_start;
1446 	/* Limit to 4TB as metadata cannot record more than that.
1447 	 * (not needed for Linear and RAID0 as metadata doesn't
1448 	 * record this size)
1449 	 */
1450 	if ((u64)rdev->sectors >= (2ULL << 32) && sb->level >= 1)
1451 		rdev->sectors = (sector_t)(2ULL << 32) - 2;
1452 
1453 	if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
1454 		/* "this cannot possibly happen" ... */
1455 		ret = -EINVAL;
1456 
1457  abort:
1458 	return ret;
1459 }
1460 
md_bitmap_events_cleared(struct mddev * mddev)1461 static u64 md_bitmap_events_cleared(struct mddev *mddev)
1462 {
1463 	struct md_bitmap_stats stats;
1464 	int err;
1465 
1466 	if (!md_bitmap_enabled(mddev, false))
1467 		return 0;
1468 
1469 	err = mddev->bitmap_ops->get_stats(mddev->bitmap, &stats);
1470 	if (err)
1471 		return 0;
1472 
1473 	return stats.events_cleared;
1474 }
1475 
1476 /*
1477  * validate_super for 0.90.0
1478  * note: we are not using "freshest" for 0.9 superblock
1479  */
super_90_validate(struct mddev * mddev,struct md_rdev * freshest,struct md_rdev * rdev)1480 static int super_90_validate(struct mddev *mddev, struct md_rdev *freshest, struct md_rdev *rdev)
1481 {
1482 	mdp_disk_t *desc;
1483 	mdp_super_t *sb = page_address(rdev->sb_page);
1484 	__u64 ev1 = md_event(sb);
1485 
1486 	rdev->raid_disk = -1;
1487 	clear_bit(Faulty, &rdev->flags);
1488 	clear_bit(In_sync, &rdev->flags);
1489 	clear_bit(Bitmap_sync, &rdev->flags);
1490 	clear_bit(WriteMostly, &rdev->flags);
1491 
1492 	if (mddev->raid_disks == 0) {
1493 		mddev->major_version = 0;
1494 		mddev->minor_version = sb->minor_version;
1495 		mddev->patch_version = sb->patch_version;
1496 		mddev->external = 0;
1497 		mddev->chunk_sectors = sb->chunk_size >> 9;
1498 		mddev->ctime = sb->ctime;
1499 		mddev->utime = sb->utime;
1500 		mddev->level = sb->level;
1501 		mddev->clevel[0] = 0;
1502 		mddev->layout = sb->layout;
1503 		mddev->raid_disks = sb->raid_disks;
1504 		mddev->dev_sectors = ((sector_t)sb->size) * 2;
1505 		mddev->events = ev1;
1506 		mddev->bitmap_info.offset = 0;
1507 		mddev->bitmap_info.space = 0;
1508 		/* bitmap can use 60 K after the 4K superblocks */
1509 		mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
1510 		mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
1511 		mddev->reshape_backwards = 0;
1512 
1513 		if (mddev->minor_version >= 91) {
1514 			mddev->reshape_position = sb->reshape_position;
1515 			mddev->delta_disks = sb->delta_disks;
1516 			mddev->new_level = sb->new_level;
1517 			mddev->new_layout = sb->new_layout;
1518 			mddev->new_chunk_sectors = sb->new_chunk >> 9;
1519 			if (mddev->delta_disks < 0)
1520 				mddev->reshape_backwards = 1;
1521 		} else {
1522 			mddev->reshape_position = MaxSector;
1523 			mddev->delta_disks = 0;
1524 			mddev->new_level = mddev->level;
1525 			mddev->new_layout = mddev->layout;
1526 			mddev->new_chunk_sectors = mddev->chunk_sectors;
1527 		}
1528 		if (mddev->level == 0)
1529 			mddev->layout = -1;
1530 
1531 		if (sb->state & (1<<MD_SB_CLEAN))
1532 			mddev->resync_offset = MaxSector;
1533 		else {
1534 			if (sb->events_hi == sb->cp_events_hi &&
1535 				sb->events_lo == sb->cp_events_lo) {
1536 				mddev->resync_offset = sb->recovery_cp;
1537 			} else
1538 				mddev->resync_offset = 0;
1539 		}
1540 
1541 		memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
1542 		memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
1543 		memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
1544 		memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
1545 
1546 		mddev->max_disks = MD_SB_DISKS;
1547 
1548 		if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
1549 		    mddev->bitmap_info.file == NULL) {
1550 			mddev->bitmap_info.offset =
1551 				mddev->bitmap_info.default_offset;
1552 			mddev->bitmap_info.space =
1553 				mddev->bitmap_info.default_space;
1554 		}
1555 
1556 	} else if (mddev->pers == NULL) {
1557 		/* Insist on good event counter while assembling, except
1558 		 * for spares (which don't need an event count) */
1559 		++ev1;
1560 		if (sb->disks[rdev->desc_nr].state & (
1561 			    (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
1562 			if (ev1 < mddev->events)
1563 				return -EINVAL;
1564 	} else if (mddev->bitmap) {
1565 		/* if adding to array with a bitmap, then we can accept an
1566 		 * older device ... but not too old.
1567 		 */
1568 		if (ev1 < md_bitmap_events_cleared(mddev))
1569 			return 0;
1570 		if (ev1 < mddev->events)
1571 			set_bit(Bitmap_sync, &rdev->flags);
1572 	} else {
1573 		if (ev1 < mddev->events)
1574 			/* just a hot-add of a new device, leave raid_disk at -1 */
1575 			return 0;
1576 	}
1577 
1578 	desc = sb->disks + rdev->desc_nr;
1579 
1580 	if (desc->state & (1<<MD_DISK_FAULTY))
1581 		set_bit(Faulty, &rdev->flags);
1582 	else if (desc->state & (1<<MD_DISK_SYNC)) {
1583 		set_bit(In_sync, &rdev->flags);
1584 		rdev->raid_disk = desc->raid_disk;
1585 		rdev->saved_raid_disk = desc->raid_disk;
1586 	} else if (desc->state & (1<<MD_DISK_ACTIVE)) {
1587 		/* active but not in sync implies recovery up to
1588 		 * reshape position.  We don't know exactly where
1589 		 * that is, so set to zero for now
1590 		 */
1591 		if (mddev->minor_version >= 91) {
1592 			rdev->recovery_offset = 0;
1593 			rdev->raid_disk = desc->raid_disk;
1594 		}
1595 	}
1596 	if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
1597 		set_bit(WriteMostly, &rdev->flags);
1598 	if (desc->state & (1<<MD_DISK_FAILFAST))
1599 		set_bit(FailFast, &rdev->flags);
1600 	return 0;
1601 }
1602 
1603 /*
1604  * sync_super for 0.90.0
1605  */
super_90_sync(struct mddev * mddev,struct md_rdev * rdev)1606 static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
1607 {
1608 	mdp_super_t *sb;
1609 	struct md_rdev *rdev2;
1610 	int next_spare = mddev->raid_disks;
1611 
1612 	/* make rdev->sb match mddev data..
1613 	 *
1614 	 * 1/ zero out disks
1615 	 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
1616 	 * 3/ any empty disks < next_spare become removed
1617 	 *
1618 	 * disks[0] gets initialised to REMOVED because
1619 	 * we cannot be sure from other fields if it has
1620 	 * been initialised or not.
1621 	 */
1622 	int i;
1623 	int active=0, working=0,failed=0,spare=0,nr_disks=0;
1624 
1625 	rdev->sb_size = MD_SB_BYTES;
1626 
1627 	sb = page_address(rdev->sb_page);
1628 
1629 	memset(sb, 0, sizeof(*sb));
1630 
1631 	sb->md_magic = MD_SB_MAGIC;
1632 	sb->major_version = mddev->major_version;
1633 	sb->patch_version = mddev->patch_version;
1634 	sb->gvalid_words  = 0; /* ignored */
1635 	memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
1636 	memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
1637 	memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
1638 	memcpy(&sb->set_uuid3, mddev->uuid+12,4);
1639 
1640 	sb->ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
1641 	sb->level = mddev->level;
1642 	sb->size = mddev->dev_sectors / 2;
1643 	sb->raid_disks = mddev->raid_disks;
1644 	sb->md_minor = mddev->md_minor;
1645 	sb->not_persistent = 0;
1646 	sb->utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
1647 	sb->state = 0;
1648 	sb->events_hi = (mddev->events>>32);
1649 	sb->events_lo = (u32)mddev->events;
1650 
1651 	if (mddev->reshape_position == MaxSector)
1652 		sb->minor_version = 90;
1653 	else {
1654 		sb->minor_version = 91;
1655 		sb->reshape_position = mddev->reshape_position;
1656 		sb->new_level = mddev->new_level;
1657 		sb->delta_disks = mddev->delta_disks;
1658 		sb->new_layout = mddev->new_layout;
1659 		sb->new_chunk = mddev->new_chunk_sectors << 9;
1660 	}
1661 	mddev->minor_version = sb->minor_version;
1662 	if (mddev->in_sync)
1663 	{
1664 		sb->recovery_cp = mddev->resync_offset;
1665 		sb->cp_events_hi = (mddev->events>>32);
1666 		sb->cp_events_lo = (u32)mddev->events;
1667 		if (mddev->resync_offset == MaxSector)
1668 			sb->state = (1<< MD_SB_CLEAN);
1669 	} else
1670 		sb->recovery_cp = 0;
1671 
1672 	sb->layout = mddev->layout;
1673 	sb->chunk_size = mddev->chunk_sectors << 9;
1674 
1675 	if (mddev->bitmap && mddev->bitmap_info.file == NULL)
1676 		sb->state |= (1<<MD_SB_BITMAP_PRESENT);
1677 
1678 	sb->disks[0].state = (1<<MD_DISK_REMOVED);
1679 	rdev_for_each(rdev2, mddev) {
1680 		mdp_disk_t *d;
1681 		int desc_nr;
1682 		int is_active = test_bit(In_sync, &rdev2->flags);
1683 
1684 		if (rdev2->raid_disk >= 0 &&
1685 		    sb->minor_version >= 91)
1686 			/* we have nowhere to store the recovery_offset,
1687 			 * but if it is not below the reshape_position,
1688 			 * we can piggy-back on that.
1689 			 */
1690 			is_active = 1;
1691 		if (rdev2->raid_disk < 0 ||
1692 		    test_bit(Faulty, &rdev2->flags))
1693 			is_active = 0;
1694 		if (is_active)
1695 			desc_nr = rdev2->raid_disk;
1696 		else
1697 			desc_nr = next_spare++;
1698 		rdev2->desc_nr = desc_nr;
1699 		d = &sb->disks[rdev2->desc_nr];
1700 		nr_disks++;
1701 		d->number = rdev2->desc_nr;
1702 		d->major = MAJOR(rdev2->bdev->bd_dev);
1703 		d->minor = MINOR(rdev2->bdev->bd_dev);
1704 		if (is_active)
1705 			d->raid_disk = rdev2->raid_disk;
1706 		else
1707 			d->raid_disk = rdev2->desc_nr; /* compatibility */
1708 		if (test_bit(Faulty, &rdev2->flags))
1709 			d->state = (1<<MD_DISK_FAULTY);
1710 		else if (is_active) {
1711 			d->state = (1<<MD_DISK_ACTIVE);
1712 			if (test_bit(In_sync, &rdev2->flags))
1713 				d->state |= (1<<MD_DISK_SYNC);
1714 			active++;
1715 			working++;
1716 		} else {
1717 			d->state = 0;
1718 			spare++;
1719 			working++;
1720 		}
1721 		if (test_bit(WriteMostly, &rdev2->flags))
1722 			d->state |= (1<<MD_DISK_WRITEMOSTLY);
1723 		if (test_bit(FailFast, &rdev2->flags))
1724 			d->state |= (1<<MD_DISK_FAILFAST);
1725 	}
1726 	/* now set the "removed" and "faulty" bits on any missing devices */
1727 	for (i=0 ; i < mddev->raid_disks ; i++) {
1728 		mdp_disk_t *d = &sb->disks[i];
1729 		if (d->state == 0 && d->number == 0) {
1730 			d->number = i;
1731 			d->raid_disk = i;
1732 			d->state = (1<<MD_DISK_REMOVED);
1733 			d->state |= (1<<MD_DISK_FAULTY);
1734 			failed++;
1735 		}
1736 	}
1737 	sb->nr_disks = nr_disks;
1738 	sb->active_disks = active;
1739 	sb->working_disks = working;
1740 	sb->failed_disks = failed;
1741 	sb->spare_disks = spare;
1742 
1743 	sb->this_disk = sb->disks[rdev->desc_nr];
1744 	sb->sb_csum = calc_sb_csum(sb);
1745 }
1746 
1747 /*
1748  * rdev_size_change for 0.90.0
1749  */
1750 static unsigned long long
super_90_rdev_size_change(struct md_rdev * rdev,sector_t num_sectors)1751 super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1752 {
1753 	if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1754 		return 0; /* component must fit device */
1755 	if (rdev->mddev->bitmap_info.offset)
1756 		return 0; /* can't move bitmap */
1757 	rdev->sb_start = calc_dev_sboffset(rdev);
1758 	if (!num_sectors || num_sectors > rdev->sb_start)
1759 		num_sectors = rdev->sb_start;
1760 	/* Limit to 4TB as metadata cannot record more than that.
1761 	 * 4TB == 2^32 KB, or 2*2^32 sectors.
1762 	 */
1763 	if ((u64)num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
1764 		num_sectors = (sector_t)(2ULL << 32) - 2;
1765 	do {
1766 		md_write_metadata(rdev->mddev, rdev, rdev->sb_start,
1767 				  rdev->sb_size, rdev->sb_page, 0);
1768 	} while (md_super_wait(rdev->mddev) < 0);
1769 	return num_sectors;
1770 }
1771 
1772 static int
super_90_allow_new_offset(struct md_rdev * rdev,unsigned long long new_offset)1773 super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset)
1774 {
1775 	/* non-zero offset changes not possible with v0.90 */
1776 	return new_offset == 0;
1777 }
1778 
1779 /*
1780  * version 1 superblock
1781  */
1782 
calc_sb_1_csum(struct mdp_superblock_1 * sb)1783 static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb)
1784 {
1785 	__le32 disk_csum;
1786 	u32 csum;
1787 	unsigned long long newcsum;
1788 	int size = 256 + le32_to_cpu(sb->max_dev)*2;
1789 	__le32 *isuper = (__le32*)sb;
1790 
1791 	disk_csum = sb->sb_csum;
1792 	sb->sb_csum = 0;
1793 	newcsum = 0;
1794 	for (; size >= 4; size -= 4)
1795 		newcsum += le32_to_cpu(*isuper++);
1796 
1797 	if (size == 2)
1798 		newcsum += le16_to_cpu(*(__le16*) isuper);
1799 
1800 	csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1801 	sb->sb_csum = disk_csum;
1802 	return cpu_to_le32(csum);
1803 }
1804 
super_1_load(struct md_rdev * rdev,struct md_rdev * refdev,int minor_version)1805 static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
1806 {
1807 	struct mdp_superblock_1 *sb;
1808 	int ret;
1809 	sector_t sb_start;
1810 	sector_t sectors;
1811 	int bmask;
1812 	bool spare_disk = true;
1813 
1814 	/*
1815 	 * Calculate the position of the superblock in 512byte sectors.
1816 	 * It is always aligned to a 4K boundary and
1817 	 * depeding on minor_version, it can be:
1818 	 * 0: At least 8K, but less than 12K, from end of device
1819 	 * 1: At start of device
1820 	 * 2: 4K from start of device.
1821 	 */
1822 	switch(minor_version) {
1823 	case 0:
1824 		sb_start = bdev_nr_sectors(rdev->bdev) - 8 * 2;
1825 		sb_start &= ~(sector_t)(4*2-1);
1826 		break;
1827 	case 1:
1828 		sb_start = 0;
1829 		break;
1830 	case 2:
1831 		sb_start = 8;
1832 		break;
1833 	default:
1834 		return -EINVAL;
1835 	}
1836 	rdev->sb_start = sb_start;
1837 
1838 	/* superblock is rarely larger than 1K, but it can be larger,
1839 	 * and it is safe to read 4k, so we do that
1840 	 */
1841 	ret = read_disk_sb(rdev, 4096);
1842 	if (ret) return ret;
1843 
1844 	sb = page_address(rdev->sb_page);
1845 
1846 	if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1847 	    sb->major_version != cpu_to_le32(1) ||
1848 	    le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1849 	    le64_to_cpu(sb->super_offset) != rdev->sb_start ||
1850 	    (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1851 		return -EINVAL;
1852 
1853 	if (calc_sb_1_csum(sb) != sb->sb_csum) {
1854 		pr_warn("md: invalid superblock checksum on %pg\n",
1855 			rdev->bdev);
1856 		return -EINVAL;
1857 	}
1858 	if (le64_to_cpu(sb->data_size) < 10) {
1859 		pr_warn("md: data_size too small on %pg\n",
1860 			rdev->bdev);
1861 		return -EINVAL;
1862 	}
1863 	if (sb->pad0 ||
1864 	    sb->pad3[0] ||
1865 	    memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1]))) {
1866 		pr_warn("Some padding is non-zero on %pg, might be a new feature\n",
1867 			rdev->bdev);
1868 		if (check_new_feature)
1869 			return -EINVAL;
1870 		pr_warn("check_new_feature is disabled, data corruption possible\n");
1871 	}
1872 
1873 	rdev->preferred_minor = 0xffff;
1874 	rdev->data_offset = le64_to_cpu(sb->data_offset);
1875 	rdev->new_data_offset = rdev->data_offset;
1876 	if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
1877 	    (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
1878 		rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
1879 	atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1880 
1881 	rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1882 	bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1883 	if (rdev->sb_size & bmask)
1884 		rdev->sb_size = (rdev->sb_size | bmask) + 1;
1885 
1886 	if (minor_version
1887 	    && rdev->data_offset < sb_start + (rdev->sb_size/512))
1888 		return -EINVAL;
1889 	if (minor_version
1890 	    && rdev->new_data_offset < sb_start + (rdev->sb_size/512))
1891 		return -EINVAL;
1892 
1893 	rdev->desc_nr = le32_to_cpu(sb->dev_number);
1894 
1895 	if (!rdev->bb_page) {
1896 		rdev->bb_page = alloc_page(GFP_KERNEL);
1897 		if (!rdev->bb_page)
1898 			return -ENOMEM;
1899 	}
1900 	if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
1901 	    rdev->badblocks.count == 0) {
1902 		/* need to load the bad block list.
1903 		 * Currently we limit it to one page.
1904 		 */
1905 		s32 offset;
1906 		sector_t bb_sector;
1907 		__le64 *bbp;
1908 		int i;
1909 		int sectors = le16_to_cpu(sb->bblog_size);
1910 		if (sectors > (PAGE_SIZE / 512))
1911 			return -EINVAL;
1912 		offset = le32_to_cpu(sb->bblog_offset);
1913 		if (offset == 0)
1914 			return -EINVAL;
1915 		bb_sector = (long long)offset;
1916 		if (!sync_page_io(rdev, bb_sector, sectors << 9,
1917 				  rdev->bb_page, REQ_OP_READ, true))
1918 			return -EIO;
1919 		bbp = (__le64 *)page_address(rdev->bb_page);
1920 		rdev->badblocks.shift = sb->bblog_shift;
1921 		for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
1922 			u64 bb = le64_to_cpu(*bbp);
1923 			int count = bb & (0x3ff);
1924 			u64 sector = bb >> 10;
1925 			sector <<= sb->bblog_shift;
1926 			count <<= sb->bblog_shift;
1927 			if (bb + 1 == 0)
1928 				break;
1929 			if (!badblocks_set(&rdev->badblocks, sector, count, 1))
1930 				return -EINVAL;
1931 		}
1932 	} else if (sb->bblog_offset != 0)
1933 		rdev->badblocks.shift = 0;
1934 
1935 	if ((le32_to_cpu(sb->feature_map) &
1936 	    (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS))) {
1937 		rdev->ppl.offset = (__s16)le16_to_cpu(sb->ppl.offset);
1938 		rdev->ppl.size = le16_to_cpu(sb->ppl.size);
1939 		rdev->ppl.sector = rdev->sb_start + rdev->ppl.offset;
1940 	}
1941 
1942 	if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT) &&
1943 	    sb->level != 0)
1944 		return -EINVAL;
1945 
1946 	/* not spare disk */
1947 	if (rdev->desc_nr >= 0 && rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1948 	    (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
1949 	     le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL))
1950 		spare_disk = false;
1951 
1952 	if (!refdev) {
1953 		if (!spare_disk)
1954 			ret = 1;
1955 		else
1956 			ret = 0;
1957 	} else {
1958 		__u64 ev1, ev2;
1959 		struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
1960 
1961 		if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1962 		    sb->level != refsb->level ||
1963 		    sb->layout != refsb->layout ||
1964 		    sb->chunksize != refsb->chunksize) {
1965 			pr_warn("md: %pg has strangely different superblock to %pg\n",
1966 				rdev->bdev,
1967 				refdev->bdev);
1968 			return -EINVAL;
1969 		}
1970 		ev1 = le64_to_cpu(sb->events);
1971 		ev2 = le64_to_cpu(refsb->events);
1972 
1973 		if (!spare_disk && ev1 > ev2)
1974 			ret = 1;
1975 		else
1976 			ret = 0;
1977 	}
1978 	if (minor_version)
1979 		sectors = bdev_nr_sectors(rdev->bdev) - rdev->data_offset;
1980 	else
1981 		sectors = rdev->sb_start;
1982 	if (sectors < le64_to_cpu(sb->data_size))
1983 		return -EINVAL;
1984 	rdev->sectors = le64_to_cpu(sb->data_size);
1985 	return ret;
1986 }
1987 
super_1_validate(struct mddev * mddev,struct md_rdev * freshest,struct md_rdev * rdev)1988 static int super_1_validate(struct mddev *mddev, struct md_rdev *freshest, struct md_rdev *rdev)
1989 {
1990 	struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
1991 	__u64 ev1 = le64_to_cpu(sb->events);
1992 	int role;
1993 
1994 	rdev->raid_disk = -1;
1995 	clear_bit(Faulty, &rdev->flags);
1996 	clear_bit(In_sync, &rdev->flags);
1997 	clear_bit(Bitmap_sync, &rdev->flags);
1998 	clear_bit(WriteMostly, &rdev->flags);
1999 
2000 	if (mddev->raid_disks == 0) {
2001 		mddev->major_version = 1;
2002 		mddev->patch_version = 0;
2003 		mddev->external = 0;
2004 		mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
2005 		mddev->ctime = le64_to_cpu(sb->ctime);
2006 		mddev->utime = le64_to_cpu(sb->utime);
2007 		mddev->level = le32_to_cpu(sb->level);
2008 		mddev->clevel[0] = 0;
2009 		mddev->layout = le32_to_cpu(sb->layout);
2010 		mddev->raid_disks = le32_to_cpu(sb->raid_disks);
2011 		mddev->dev_sectors = le64_to_cpu(sb->size);
2012 		mddev->events = ev1;
2013 		mddev->bitmap_info.offset = 0;
2014 		mddev->bitmap_info.space = 0;
2015 		/* Default location for bitmap is 1K after superblock
2016 		 * using 3K - total of 4K
2017 		 */
2018 		mddev->bitmap_info.default_offset = 1024 >> 9;
2019 		mddev->bitmap_info.default_space = (4096-1024) >> 9;
2020 		mddev->reshape_backwards = 0;
2021 
2022 		mddev->resync_offset = le64_to_cpu(sb->resync_offset);
2023 		memcpy(mddev->uuid, sb->set_uuid, 16);
2024 
2025 		mddev->max_disks =  (4096-256)/2;
2026 
2027 		if (!mddev->logical_block_size)
2028 			mddev->logical_block_size = le32_to_cpu(sb->logical_block_size);
2029 
2030 		if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
2031 		    mddev->bitmap_info.file == NULL) {
2032 			mddev->bitmap_info.offset =
2033 				(__s32)le32_to_cpu(sb->bitmap_offset);
2034 			/* Metadata doesn't record how much space is available.
2035 			 * For 1.0, we assume we can use up to the superblock
2036 			 * if before, else to 4K beyond superblock.
2037 			 * For others, assume no change is possible.
2038 			 */
2039 			if (mddev->minor_version > 0)
2040 				mddev->bitmap_info.space = 0;
2041 			else if (mddev->bitmap_info.offset > 0)
2042 				mddev->bitmap_info.space =
2043 					8 - mddev->bitmap_info.offset;
2044 			else
2045 				mddev->bitmap_info.space =
2046 					-mddev->bitmap_info.offset;
2047 		}
2048 
2049 		if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
2050 			mddev->reshape_position = le64_to_cpu(sb->reshape_position);
2051 			mddev->delta_disks = le32_to_cpu(sb->delta_disks);
2052 			mddev->new_level = le32_to_cpu(sb->new_level);
2053 			mddev->new_layout = le32_to_cpu(sb->new_layout);
2054 			mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
2055 			if (mddev->delta_disks < 0 ||
2056 			    (mddev->delta_disks == 0 &&
2057 			     (le32_to_cpu(sb->feature_map)
2058 			      & MD_FEATURE_RESHAPE_BACKWARDS)))
2059 				mddev->reshape_backwards = 1;
2060 		} else {
2061 			mddev->reshape_position = MaxSector;
2062 			mddev->delta_disks = 0;
2063 			mddev->new_level = mddev->level;
2064 			mddev->new_layout = mddev->layout;
2065 			mddev->new_chunk_sectors = mddev->chunk_sectors;
2066 		}
2067 
2068 		if (mddev->level == 0 &&
2069 		    !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT))
2070 			mddev->layout = -1;
2071 
2072 		if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)
2073 			set_bit(MD_HAS_JOURNAL, &mddev->flags);
2074 
2075 		if (le32_to_cpu(sb->feature_map) &
2076 		    (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS)) {
2077 			if (le32_to_cpu(sb->feature_map) &
2078 			    (MD_FEATURE_BITMAP_OFFSET | MD_FEATURE_JOURNAL))
2079 				return -EINVAL;
2080 			if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_PPL) &&
2081 			    (le32_to_cpu(sb->feature_map) &
2082 					    MD_FEATURE_MULTIPLE_PPLS))
2083 				return -EINVAL;
2084 			set_bit(MD_HAS_PPL, &mddev->flags);
2085 		}
2086 	} else if (mddev->pers == NULL) {
2087 		/* Insist of good event counter while assembling, except for
2088 		 * spares (which don't need an event count).
2089 		 * Similar to mdadm, we allow event counter difference of 1
2090 		 * from the freshest device.
2091 		 */
2092 		if (rdev->desc_nr >= 0 &&
2093 		    rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
2094 		    (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
2095 		     le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL))
2096 			if (ev1 + 1 < mddev->events)
2097 				return -EINVAL;
2098 	} else if (mddev->bitmap) {
2099 		/* If adding to array with a bitmap, then we can accept an
2100 		 * older device, but not too old.
2101 		 */
2102 		if (ev1 < md_bitmap_events_cleared(mddev))
2103 			return 0;
2104 		if (ev1 < mddev->events)
2105 			set_bit(Bitmap_sync, &rdev->flags);
2106 	} else {
2107 		if (ev1 < mddev->events)
2108 			/* just a hot-add of a new device, leave raid_disk at -1 */
2109 			return 0;
2110 	}
2111 
2112 	if (rdev->desc_nr < 0 ||
2113 	    rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
2114 		role = MD_DISK_ROLE_SPARE;
2115 		rdev->desc_nr = -1;
2116 	} else if (mddev->pers == NULL && freshest && ev1 < mddev->events) {
2117 		/*
2118 		 * If we are assembling, and our event counter is smaller than the
2119 		 * highest event counter, we cannot trust our superblock about the role.
2120 		 * It could happen that our rdev was marked as Faulty, and all other
2121 		 * superblocks were updated with +1 event counter.
2122 		 * Then, before the next superblock update, which typically happens when
2123 		 * remove_and_add_spares() removes the device from the array, there was
2124 		 * a crash or reboot.
2125 		 * If we allow current rdev without consulting the freshest superblock,
2126 		 * we could cause data corruption.
2127 		 * Note that in this case our event counter is smaller by 1 than the
2128 		 * highest, otherwise, this rdev would not be allowed into array;
2129 		 * both kernel and mdadm allow event counter difference of 1.
2130 		 */
2131 		struct mdp_superblock_1 *freshest_sb = page_address(freshest->sb_page);
2132 		u32 freshest_max_dev = le32_to_cpu(freshest_sb->max_dev);
2133 
2134 		if (rdev->desc_nr >= freshest_max_dev) {
2135 			/* this is unexpected, better not proceed */
2136 			pr_warn("md: %s: rdev[%pg]: desc_nr(%d) >= freshest(%pg)->sb->max_dev(%u)\n",
2137 				mdname(mddev), rdev->bdev, rdev->desc_nr,
2138 				freshest->bdev, freshest_max_dev);
2139 			return -EUCLEAN;
2140 		}
2141 
2142 		role = le16_to_cpu(freshest_sb->dev_roles[rdev->desc_nr]);
2143 		pr_debug("md: %s: rdev[%pg]: role=%d(0x%x) according to freshest %pg\n",
2144 			 mdname(mddev), rdev->bdev, role, role, freshest->bdev);
2145 	} else {
2146 		role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
2147 	}
2148 	switch (role) {
2149 	case MD_DISK_ROLE_SPARE: /* spare */
2150 		break;
2151 	case MD_DISK_ROLE_FAULTY: /* faulty */
2152 		set_bit(Faulty, &rdev->flags);
2153 		break;
2154 	case MD_DISK_ROLE_JOURNAL: /* journal device */
2155 		if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) {
2156 			/* journal device without journal feature */
2157 			pr_warn("md: journal device provided without journal feature, ignoring the device\n");
2158 			return -EINVAL;
2159 		}
2160 		set_bit(Journal, &rdev->flags);
2161 		rdev->journal_tail = le64_to_cpu(sb->journal_tail);
2162 		rdev->raid_disk = 0;
2163 		break;
2164 	default:
2165 		rdev->saved_raid_disk = role;
2166 		if ((le32_to_cpu(sb->feature_map) &
2167 		     MD_FEATURE_RECOVERY_OFFSET)) {
2168 			rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
2169 			if (!(le32_to_cpu(sb->feature_map) &
2170 			      MD_FEATURE_RECOVERY_BITMAP))
2171 				rdev->saved_raid_disk = -1;
2172 		} else {
2173 			/*
2174 			 * If the array is FROZEN, then the device can't
2175 			 * be in_sync with rest of array.
2176 			 */
2177 			if (!test_bit(MD_RECOVERY_FROZEN,
2178 				      &mddev->recovery))
2179 				set_bit(In_sync, &rdev->flags);
2180 		}
2181 		rdev->raid_disk = role;
2182 		break;
2183 	}
2184 	if (sb->devflags & WriteMostly1)
2185 		set_bit(WriteMostly, &rdev->flags);
2186 	if (sb->devflags & FailFast1)
2187 		set_bit(FailFast, &rdev->flags);
2188 	if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
2189 		set_bit(Replacement, &rdev->flags);
2190 
2191 	return 0;
2192 }
2193 
super_1_sync(struct mddev * mddev,struct md_rdev * rdev)2194 static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
2195 {
2196 	struct mdp_superblock_1 *sb;
2197 	struct md_rdev *rdev2;
2198 	int max_dev, i;
2199 	/* make rdev->sb match mddev and rdev data. */
2200 
2201 	sb = page_address(rdev->sb_page);
2202 
2203 	sb->feature_map = 0;
2204 	sb->pad0 = 0;
2205 	sb->recovery_offset = cpu_to_le64(0);
2206 	memset(sb->pad3, 0, sizeof(sb->pad3));
2207 
2208 	sb->utime = cpu_to_le64((__u64)mddev->utime);
2209 	sb->events = cpu_to_le64(mddev->events);
2210 	if (mddev->in_sync)
2211 		sb->resync_offset = cpu_to_le64(mddev->resync_offset);
2212 	else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags))
2213 		sb->resync_offset = cpu_to_le64(MaxSector);
2214 	else
2215 		sb->resync_offset = cpu_to_le64(0);
2216 
2217 	sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
2218 
2219 	sb->raid_disks = cpu_to_le32(mddev->raid_disks);
2220 	sb->size = cpu_to_le64(mddev->dev_sectors);
2221 	sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
2222 	sb->level = cpu_to_le32(mddev->level);
2223 	sb->layout = cpu_to_le32(mddev->layout);
2224 	sb->logical_block_size = cpu_to_le32(mddev->logical_block_size);
2225 	if (test_bit(FailFast, &rdev->flags))
2226 		sb->devflags |= FailFast1;
2227 	else
2228 		sb->devflags &= ~FailFast1;
2229 
2230 	if (test_bit(WriteMostly, &rdev->flags))
2231 		sb->devflags |= WriteMostly1;
2232 	else
2233 		sb->devflags &= ~WriteMostly1;
2234 	sb->data_offset = cpu_to_le64(rdev->data_offset);
2235 	sb->data_size = cpu_to_le64(rdev->sectors);
2236 
2237 	if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
2238 		sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
2239 		sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
2240 	}
2241 
2242 	if (rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags) &&
2243 	    !test_bit(In_sync, &rdev->flags)) {
2244 		sb->feature_map |=
2245 			cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
2246 		sb->recovery_offset =
2247 			cpu_to_le64(rdev->recovery_offset);
2248 		if (rdev->saved_raid_disk >= 0 && mddev->bitmap)
2249 			sb->feature_map |=
2250 				cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP);
2251 	}
2252 	/* Note: recovery_offset and journal_tail share space  */
2253 	if (test_bit(Journal, &rdev->flags))
2254 		sb->journal_tail = cpu_to_le64(rdev->journal_tail);
2255 	if (test_bit(Replacement, &rdev->flags))
2256 		sb->feature_map |=
2257 			cpu_to_le32(MD_FEATURE_REPLACEMENT);
2258 
2259 	if (mddev->reshape_position != MaxSector) {
2260 		sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
2261 		sb->reshape_position = cpu_to_le64(mddev->reshape_position);
2262 		sb->new_layout = cpu_to_le32(mddev->new_layout);
2263 		sb->delta_disks = cpu_to_le32(mddev->delta_disks);
2264 		sb->new_level = cpu_to_le32(mddev->new_level);
2265 		sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
2266 		if (mddev->delta_disks == 0 &&
2267 		    mddev->reshape_backwards)
2268 			sb->feature_map
2269 				|= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS);
2270 		if (rdev->new_data_offset != rdev->data_offset) {
2271 			sb->feature_map
2272 				|= cpu_to_le32(MD_FEATURE_NEW_OFFSET);
2273 			sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset
2274 							     - rdev->data_offset));
2275 		}
2276 	}
2277 
2278 	if (mddev_is_clustered(mddev))
2279 		sb->feature_map |= cpu_to_le32(MD_FEATURE_CLUSTERED);
2280 
2281 	if (rdev->badblocks.count == 0)
2282 		/* Nothing to do for bad blocks*/ ;
2283 	else if (sb->bblog_offset == 0)
2284 		/* Cannot record bad blocks on this device */
2285 		md_error(mddev, rdev);
2286 	else {
2287 		struct badblocks *bb = &rdev->badblocks;
2288 		__le64 *bbp = (__le64 *)page_address(rdev->bb_page);
2289 		u64 *p = bb->page;
2290 		sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
2291 		if (bb->changed) {
2292 			unsigned seq;
2293 
2294 retry:
2295 			seq = read_seqbegin(&bb->lock);
2296 
2297 			memset(bbp, 0xff, PAGE_SIZE);
2298 
2299 			for (i = 0 ; i < bb->count ; i++) {
2300 				u64 internal_bb = p[i];
2301 				u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
2302 						| BB_LEN(internal_bb));
2303 				bbp[i] = cpu_to_le64(store_bb);
2304 			}
2305 			bb->changed = 0;
2306 			if (read_seqretry(&bb->lock, seq))
2307 				goto retry;
2308 
2309 			bb->sector = (rdev->sb_start +
2310 				      (int)le32_to_cpu(sb->bblog_offset));
2311 			bb->size = le16_to_cpu(sb->bblog_size);
2312 		}
2313 	}
2314 
2315 	max_dev = 0;
2316 	rdev_for_each(rdev2, mddev)
2317 		if (rdev2->desc_nr+1 > max_dev)
2318 			max_dev = rdev2->desc_nr+1;
2319 
2320 	if (max_dev > le32_to_cpu(sb->max_dev)) {
2321 		int bmask;
2322 		sb->max_dev = cpu_to_le32(max_dev);
2323 		rdev->sb_size = max_dev * 2 + 256;
2324 		bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
2325 		if (rdev->sb_size & bmask)
2326 			rdev->sb_size = (rdev->sb_size | bmask) + 1;
2327 	} else
2328 		max_dev = le32_to_cpu(sb->max_dev);
2329 
2330 	for (i=0; i<max_dev;i++)
2331 		sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
2332 
2333 	if (test_bit(MD_HAS_JOURNAL, &mddev->flags))
2334 		sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL);
2335 
2336 	if (test_bit(MD_HAS_PPL, &mddev->flags)) {
2337 		if (test_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags))
2338 			sb->feature_map |=
2339 			    cpu_to_le32(MD_FEATURE_MULTIPLE_PPLS);
2340 		else
2341 			sb->feature_map |= cpu_to_le32(MD_FEATURE_PPL);
2342 		sb->ppl.offset = cpu_to_le16(rdev->ppl.offset);
2343 		sb->ppl.size = cpu_to_le16(rdev->ppl.size);
2344 	}
2345 
2346 	rdev_for_each(rdev2, mddev) {
2347 		i = rdev2->desc_nr;
2348 		if (test_bit(Faulty, &rdev2->flags))
2349 			sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY);
2350 		else if (test_bit(In_sync, &rdev2->flags))
2351 			sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
2352 		else if (test_bit(Journal, &rdev2->flags))
2353 			sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_JOURNAL);
2354 		else if (rdev2->raid_disk >= 0)
2355 			sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
2356 		else
2357 			sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
2358 	}
2359 
2360 	sb->sb_csum = calc_sb_1_csum(sb);
2361 }
2362 
super_1_choose_bm_space(sector_t dev_size)2363 static sector_t super_1_choose_bm_space(sector_t dev_size)
2364 {
2365 	sector_t bm_space;
2366 
2367 	/* if the device is bigger than 8Gig, save 64k for bitmap
2368 	 * usage, if bigger than 200Gig, save 128k
2369 	 */
2370 	if (dev_size < 64*2)
2371 		bm_space = 0;
2372 	else if (dev_size - 64*2 >= 200*1024*1024*2)
2373 		bm_space = 128*2;
2374 	else if (dev_size - 4*2 > 8*1024*1024*2)
2375 		bm_space = 64*2;
2376 	else
2377 		bm_space = 4*2;
2378 	return bm_space;
2379 }
2380 
2381 static unsigned long long
super_1_rdev_size_change(struct md_rdev * rdev,sector_t num_sectors)2382 super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
2383 {
2384 	struct mdp_superblock_1 *sb;
2385 	sector_t max_sectors;
2386 	if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
2387 		return 0; /* component must fit device */
2388 	if (rdev->data_offset != rdev->new_data_offset)
2389 		return 0; /* too confusing */
2390 	if (rdev->sb_start < rdev->data_offset) {
2391 		/* minor versions 1 and 2; superblock before data */
2392 		max_sectors = bdev_nr_sectors(rdev->bdev) - rdev->data_offset;
2393 		if (!num_sectors || num_sectors > max_sectors)
2394 			num_sectors = max_sectors;
2395 	} else if (rdev->mddev->bitmap_info.offset) {
2396 		/* minor version 0 with bitmap we can't move */
2397 		return 0;
2398 	} else {
2399 		/* minor version 0; superblock after data */
2400 		sector_t sb_start, bm_space;
2401 		sector_t dev_size = bdev_nr_sectors(rdev->bdev);
2402 
2403 		/* 8K is for superblock */
2404 		sb_start = dev_size - 8*2;
2405 		sb_start &= ~(sector_t)(4*2 - 1);
2406 
2407 		bm_space = super_1_choose_bm_space(dev_size);
2408 
2409 		/* Space that can be used to store date needs to decrease
2410 		 * superblock bitmap space and bad block space(4K)
2411 		 */
2412 		max_sectors = sb_start - bm_space - 4*2;
2413 
2414 		if (!num_sectors || num_sectors > max_sectors)
2415 			num_sectors = max_sectors;
2416 		rdev->sb_start = sb_start;
2417 	}
2418 	sb = page_address(rdev->sb_page);
2419 	sb->data_size = cpu_to_le64(num_sectors);
2420 	sb->super_offset = cpu_to_le64(rdev->sb_start);
2421 	sb->sb_csum = calc_sb_1_csum(sb);
2422 	do {
2423 		md_write_metadata(rdev->mddev, rdev, rdev->sb_start,
2424 				  rdev->sb_size, rdev->sb_page, 0);
2425 	} while (md_super_wait(rdev->mddev) < 0);
2426 	return num_sectors;
2427 
2428 }
2429 
2430 static int
super_1_allow_new_offset(struct md_rdev * rdev,unsigned long long new_offset)2431 super_1_allow_new_offset(struct md_rdev *rdev,
2432 			 unsigned long long new_offset)
2433 {
2434 	struct mddev *mddev = rdev->mddev;
2435 
2436 	/* All necessary checks on new >= old have been done */
2437 	if (new_offset >= rdev->data_offset)
2438 		return 1;
2439 
2440 	/* with 1.0 metadata, there is no metadata to tread on
2441 	 * so we can always move back */
2442 	if (mddev->minor_version == 0)
2443 		return 1;
2444 
2445 	/* otherwise we must be sure not to step on
2446 	 * any metadata, so stay:
2447 	 * 36K beyond start of superblock
2448 	 * beyond end of badblocks
2449 	 * beyond write-intent bitmap
2450 	 */
2451 	if (rdev->sb_start + (32+4)*2 > new_offset)
2452 		return 0;
2453 
2454 	if (md_bitmap_registered(mddev) && !mddev->bitmap_info.file) {
2455 		struct md_bitmap_stats stats;
2456 		int err;
2457 
2458 		err = mddev->bitmap_ops->get_stats(mddev->bitmap, &stats);
2459 		if (!err && rdev->sb_start + mddev->bitmap_info.offset +
2460 		    stats.file_pages * (PAGE_SIZE >> 9) > new_offset)
2461 			return 0;
2462 	}
2463 
2464 	if (rdev->badblocks.sector + rdev->badblocks.size > new_offset)
2465 		return 0;
2466 
2467 	return 1;
2468 }
2469 
2470 static struct super_type super_types[] = {
2471 	[0] = {
2472 		.name	= "0.90.0",
2473 		.owner	= THIS_MODULE,
2474 		.load_super	    = super_90_load,
2475 		.validate_super	    = super_90_validate,
2476 		.sync_super	    = super_90_sync,
2477 		.rdev_size_change   = super_90_rdev_size_change,
2478 		.allow_new_offset   = super_90_allow_new_offset,
2479 	},
2480 	[1] = {
2481 		.name	= "md-1",
2482 		.owner	= THIS_MODULE,
2483 		.load_super	    = super_1_load,
2484 		.validate_super	    = super_1_validate,
2485 		.sync_super	    = super_1_sync,
2486 		.rdev_size_change   = super_1_rdev_size_change,
2487 		.allow_new_offset   = super_1_allow_new_offset,
2488 	},
2489 };
2490 
sync_super(struct mddev * mddev,struct md_rdev * rdev)2491 static void sync_super(struct mddev *mddev, struct md_rdev *rdev)
2492 {
2493 	if (mddev->sync_super) {
2494 		mddev->sync_super(mddev, rdev);
2495 		return;
2496 	}
2497 
2498 	BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
2499 
2500 	super_types[mddev->major_version].sync_super(mddev, rdev);
2501 }
2502 
match_mddev_units(struct mddev * mddev1,struct mddev * mddev2)2503 static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2)
2504 {
2505 	struct md_rdev *rdev, *rdev2;
2506 
2507 	rcu_read_lock();
2508 	rdev_for_each_rcu(rdev, mddev1) {
2509 		if (test_bit(Faulty, &rdev->flags) ||
2510 		    test_bit(Journal, &rdev->flags) ||
2511 		    rdev->raid_disk == -1)
2512 			continue;
2513 		rdev_for_each_rcu(rdev2, mddev2) {
2514 			if (test_bit(Faulty, &rdev2->flags) ||
2515 			    test_bit(Journal, &rdev2->flags) ||
2516 			    rdev2->raid_disk == -1)
2517 				continue;
2518 			if (rdev->bdev->bd_disk == rdev2->bdev->bd_disk) {
2519 				rcu_read_unlock();
2520 				return 1;
2521 			}
2522 		}
2523 	}
2524 	rcu_read_unlock();
2525 	return 0;
2526 }
2527 
2528 static LIST_HEAD(pending_raid_disks);
2529 
2530 /*
2531  * Try to register data integrity profile for an mddev
2532  *
2533  * This is called when an array is started and after a disk has been kicked
2534  * from the array. It only succeeds if all working and active component devices
2535  * are integrity capable with matching profiles.
2536  */
md_integrity_register(struct mddev * mddev)2537 int md_integrity_register(struct mddev *mddev)
2538 {
2539 	if (list_empty(&mddev->disks))
2540 		return 0; /* nothing to do */
2541 	if (mddev_is_dm(mddev) || !blk_get_integrity(mddev->gendisk))
2542 		return 0; /* shouldn't register */
2543 
2544 	pr_debug("md: data integrity enabled on %s\n", mdname(mddev));
2545 	return 0;
2546 }
2547 EXPORT_SYMBOL(md_integrity_register);
2548 
rdev_read_only(struct md_rdev * rdev)2549 static bool rdev_read_only(struct md_rdev *rdev)
2550 {
2551 	return bdev_read_only(rdev->bdev) ||
2552 		(rdev->meta_bdev && bdev_read_only(rdev->meta_bdev));
2553 }
2554 
bind_rdev_to_array(struct md_rdev * rdev,struct mddev * mddev)2555 static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
2556 {
2557 	char b[BDEVNAME_SIZE];
2558 	int err;
2559 
2560 	/* prevent duplicates */
2561 	if (find_rdev(mddev, rdev->bdev->bd_dev))
2562 		return -EEXIST;
2563 
2564 	if (rdev_read_only(rdev) && mddev->pers)
2565 		return -EROFS;
2566 
2567 	/* make sure rdev->sectors exceeds mddev->dev_sectors */
2568 	if (!test_bit(Journal, &rdev->flags) &&
2569 	    rdev->sectors &&
2570 	    (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) {
2571 		if (mddev->pers) {
2572 			/* Cannot change size, so fail
2573 			 * If mddev->level <= 0, then we don't care
2574 			 * about aligning sizes (e.g. linear)
2575 			 */
2576 			if (mddev->level > 0)
2577 				return -ENOSPC;
2578 		} else
2579 			mddev->dev_sectors = rdev->sectors;
2580 	}
2581 
2582 	/* Verify rdev->desc_nr is unique.
2583 	 * If it is -1, assign a free number, else
2584 	 * check number is not in use
2585 	 */
2586 	rcu_read_lock();
2587 	if (rdev->desc_nr < 0) {
2588 		int choice = 0;
2589 		if (mddev->pers)
2590 			choice = mddev->raid_disks;
2591 		while (md_find_rdev_nr_rcu(mddev, choice))
2592 			choice++;
2593 		rdev->desc_nr = choice;
2594 	} else {
2595 		if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) {
2596 			rcu_read_unlock();
2597 			return -EBUSY;
2598 		}
2599 	}
2600 	rcu_read_unlock();
2601 	if (!test_bit(Journal, &rdev->flags) &&
2602 	    mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
2603 		pr_warn("md: %s: array is limited to %d devices\n",
2604 			mdname(mddev), mddev->max_disks);
2605 		return -EBUSY;
2606 	}
2607 	snprintf(b, sizeof(b), "%pg", rdev->bdev);
2608 	strreplace(b, '/', '!');
2609 
2610 	rdev->mddev = mddev;
2611 	pr_debug("md: bind<%s>\n", b);
2612 
2613 	if (mddev->raid_disks)
2614 		mddev_create_serial_pool(mddev, rdev);
2615 
2616 	if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
2617 		goto fail;
2618 
2619 	/* failure here is OK */
2620 	err = sysfs_create_link(&rdev->kobj, bdev_kobj(rdev->bdev), "block");
2621 	rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
2622 	rdev->sysfs_unack_badblocks =
2623 		sysfs_get_dirent_safe(rdev->kobj.sd, "unacknowledged_bad_blocks");
2624 	rdev->sysfs_badblocks =
2625 		sysfs_get_dirent_safe(rdev->kobj.sd, "bad_blocks");
2626 
2627 	list_add_rcu(&rdev->same_set, &mddev->disks);
2628 	bd_link_disk_holder(rdev->bdev, mddev->gendisk);
2629 
2630 	return 0;
2631 
2632  fail:
2633 	pr_warn("md: failed to register dev-%s for %s\n",
2634 		b, mdname(mddev));
2635 	mddev_destroy_serial_pool(mddev, rdev);
2636 	return err;
2637 }
2638 
2639 void md_autodetect_dev(dev_t dev);
2640 
2641 /* just for claiming the bdev */
2642 static struct md_rdev claim_rdev;
2643 
export_rdev(struct md_rdev * rdev)2644 static void export_rdev(struct md_rdev *rdev)
2645 {
2646 	pr_debug("md: export_rdev(%pg)\n", rdev->bdev);
2647 	md_rdev_clear(rdev);
2648 #ifndef MODULE
2649 	if (test_bit(AutoDetected, &rdev->flags))
2650 		md_autodetect_dev(rdev->bdev->bd_dev);
2651 #endif
2652 	fput(rdev->bdev_file);
2653 	rdev->bdev = NULL;
2654 	kobject_put(&rdev->kobj);
2655 }
2656 
md_kick_rdev_from_array(struct md_rdev * rdev)2657 static void md_kick_rdev_from_array(struct md_rdev *rdev)
2658 {
2659 	struct mddev *mddev = rdev->mddev;
2660 
2661 	bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
2662 	list_del_rcu(&rdev->same_set);
2663 	pr_debug("md: unbind<%pg>\n", rdev->bdev);
2664 	mddev_destroy_serial_pool(rdev->mddev, rdev);
2665 	WRITE_ONCE(rdev->mddev, NULL);
2666 	sysfs_remove_link(&rdev->kobj, "block");
2667 	sysfs_put(rdev->sysfs_state);
2668 	sysfs_put(rdev->sysfs_unack_badblocks);
2669 	sysfs_put(rdev->sysfs_badblocks);
2670 	rdev->sysfs_state = NULL;
2671 	rdev->sysfs_unack_badblocks = NULL;
2672 	rdev->sysfs_badblocks = NULL;
2673 	rdev->badblocks.count = 0;
2674 
2675 	synchronize_rcu();
2676 
2677 	/*
2678 	 * kobject_del() will wait for all in progress writers to be done, where
2679 	 * reconfig_mutex is held, hence it can't be called under
2680 	 * reconfig_mutex and it's delayed to mddev_unlock().
2681 	 */
2682 	list_add(&rdev->same_set, &mddev->deleting);
2683 }
2684 
export_array(struct mddev * mddev)2685 static void export_array(struct mddev *mddev)
2686 {
2687 	struct md_rdev *rdev;
2688 
2689 	while (!list_empty(&mddev->disks)) {
2690 		rdev = list_first_entry(&mddev->disks, struct md_rdev,
2691 					same_set);
2692 		md_kick_rdev_from_array(rdev);
2693 	}
2694 	mddev->raid_disks = 0;
2695 	mddev->major_version = 0;
2696 }
2697 
set_in_sync(struct mddev * mddev)2698 static bool set_in_sync(struct mddev *mddev)
2699 {
2700 	lockdep_assert_held(&mddev->lock);
2701 	if (!mddev->in_sync) {
2702 		mddev->sync_checkers++;
2703 		spin_unlock(&mddev->lock);
2704 		percpu_ref_switch_to_atomic_sync(&mddev->writes_pending);
2705 		spin_lock(&mddev->lock);
2706 		if (!mddev->in_sync &&
2707 		    percpu_ref_is_zero(&mddev->writes_pending)) {
2708 			mddev->in_sync = 1;
2709 			/*
2710 			 * Ensure ->in_sync is visible before we clear
2711 			 * ->sync_checkers.
2712 			 */
2713 			smp_mb();
2714 			set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
2715 			sysfs_notify_dirent_safe(mddev->sysfs_state);
2716 		}
2717 		if (--mddev->sync_checkers == 0)
2718 			percpu_ref_switch_to_percpu(&mddev->writes_pending);
2719 	}
2720 	if (mddev->safemode == 1)
2721 		mddev->safemode = 0;
2722 	return mddev->in_sync;
2723 }
2724 
sync_sbs(struct mddev * mddev,int nospares)2725 static void sync_sbs(struct mddev *mddev, int nospares)
2726 {
2727 	/* Update each superblock (in-memory image), but
2728 	 * if we are allowed to, skip spares which already
2729 	 * have the right event counter, or have one earlier
2730 	 * (which would mean they aren't being marked as dirty
2731 	 * with the rest of the array)
2732 	 */
2733 	struct md_rdev *rdev;
2734 	rdev_for_each(rdev, mddev) {
2735 		if (rdev->sb_events == mddev->events ||
2736 		    (nospares &&
2737 		     rdev->raid_disk < 0 &&
2738 		     rdev->sb_events+1 == mddev->events)) {
2739 			/* Don't update this superblock */
2740 			rdev->sb_loaded = 2;
2741 		} else {
2742 			sync_super(mddev, rdev);
2743 			rdev->sb_loaded = 1;
2744 		}
2745 	}
2746 }
2747 
does_sb_need_changing(struct mddev * mddev)2748 static bool does_sb_need_changing(struct mddev *mddev)
2749 {
2750 	struct md_rdev *rdev = NULL, *iter;
2751 	struct mdp_superblock_1 *sb;
2752 	int role;
2753 
2754 	/* Find a good rdev */
2755 	rdev_for_each(iter, mddev)
2756 		if ((iter->raid_disk >= 0) && !test_bit(Faulty, &iter->flags)) {
2757 			rdev = iter;
2758 			break;
2759 		}
2760 
2761 	/* No good device found. */
2762 	if (!rdev)
2763 		return false;
2764 
2765 	sb = page_address(rdev->sb_page);
2766 	/* Check if a device has become faulty or a spare become active */
2767 	rdev_for_each(rdev, mddev) {
2768 		role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
2769 		/* Device activated? */
2770 		if (role == MD_DISK_ROLE_SPARE && rdev->raid_disk >= 0 &&
2771 		    !test_bit(Faulty, &rdev->flags))
2772 			return true;
2773 		/* Device turned faulty? */
2774 		if (test_bit(Faulty, &rdev->flags) && (role < MD_DISK_ROLE_MAX))
2775 			return true;
2776 	}
2777 
2778 	/* Check if any mddev parameters have changed */
2779 	if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
2780 	    (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
2781 	    (mddev->layout != le32_to_cpu(sb->layout)) ||
2782 	    (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
2783 	    (mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
2784 		return true;
2785 
2786 	return false;
2787 }
2788 
md_update_sb(struct mddev * mddev,int force_change)2789 void md_update_sb(struct mddev *mddev, int force_change)
2790 {
2791 	struct md_rdev *rdev;
2792 	int sync_req;
2793 	int nospares = 0;
2794 	int any_badblocks_changed = 0;
2795 	int ret = -1;
2796 
2797 	if (!md_is_rdwr(mddev)) {
2798 		if (force_change)
2799 			set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2800 		if (!mddev_is_dm(mddev))
2801 			pr_err_ratelimited("%s: can't update sb for read-only array %s\n",
2802 					   __func__, mdname(mddev));
2803 		return;
2804 	}
2805 
2806 repeat:
2807 	if (mddev_is_clustered(mddev)) {
2808 		if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
2809 			force_change = 1;
2810 		if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
2811 			nospares = 1;
2812 		ret = mddev->cluster_ops->metadata_update_start(mddev);
2813 		/* Has someone else has updated the sb */
2814 		if (!does_sb_need_changing(mddev)) {
2815 			if (ret == 0)
2816 				mddev->cluster_ops->metadata_update_cancel(mddev);
2817 			bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
2818 							 BIT(MD_SB_CHANGE_DEVS) |
2819 							 BIT(MD_SB_CHANGE_CLEAN));
2820 			return;
2821 		}
2822 	}
2823 
2824 	/*
2825 	 * First make sure individual recovery_offsets are correct
2826 	 * curr_resync_completed can only be used during recovery.
2827 	 * During reshape/resync it might use array-addresses rather
2828 	 * that device addresses.
2829 	 */
2830 	rdev_for_each(rdev, mddev) {
2831 		if (rdev->raid_disk >= 0 &&
2832 		    mddev->delta_disks >= 0 &&
2833 		    test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
2834 		    test_bit(MD_RECOVERY_RECOVER, &mddev->recovery) &&
2835 		    !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
2836 		    !test_bit(Journal, &rdev->flags) &&
2837 		    !test_bit(In_sync, &rdev->flags) &&
2838 		    mddev->curr_resync_completed > rdev->recovery_offset)
2839 				rdev->recovery_offset = mddev->curr_resync_completed;
2840 
2841 	}
2842 	if (!mddev->persistent) {
2843 		clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
2844 		clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2845 		if (!mddev->external) {
2846 			clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
2847 			rdev_for_each(rdev, mddev) {
2848 				if (rdev->badblocks.changed) {
2849 					rdev->badblocks.changed = 0;
2850 					ack_all_badblocks(&rdev->badblocks);
2851 					md_error(mddev, rdev);
2852 				}
2853 				clear_bit(Blocked, &rdev->flags);
2854 				clear_bit(BlockedBadBlocks, &rdev->flags);
2855 				wake_up(&rdev->blocked_wait);
2856 			}
2857 		}
2858 		wake_up(&mddev->sb_wait);
2859 		return;
2860 	}
2861 
2862 	spin_lock(&mddev->lock);
2863 
2864 	mddev->utime = ktime_get_real_seconds();
2865 
2866 	if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
2867 		force_change = 1;
2868 	if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
2869 		/* just a clean<-> dirty transition, possibly leave spares alone,
2870 		 * though if events isn't the right even/odd, we will have to do
2871 		 * spares after all
2872 		 */
2873 		nospares = 1;
2874 	if (force_change)
2875 		nospares = 0;
2876 	if (mddev->degraded)
2877 		/* If the array is degraded, then skipping spares is both
2878 		 * dangerous and fairly pointless.
2879 		 * Dangerous because a device that was removed from the array
2880 		 * might have a event_count that still looks up-to-date,
2881 		 * so it can be re-added without a resync.
2882 		 * Pointless because if there are any spares to skip,
2883 		 * then a recovery will happen and soon that array won't
2884 		 * be degraded any more and the spare can go back to sleep then.
2885 		 */
2886 		nospares = 0;
2887 
2888 	sync_req = mddev->in_sync;
2889 
2890 	/* If this is just a dirty<->clean transition, and the array is clean
2891 	 * and 'events' is odd, we can roll back to the previous clean state */
2892 	if (nospares
2893 	    && (mddev->in_sync && mddev->resync_offset == MaxSector)
2894 	    && mddev->can_decrease_events
2895 	    && mddev->events != 1) {
2896 		mddev->events--;
2897 		mddev->can_decrease_events = 0;
2898 	} else {
2899 		/* otherwise we have to go forward and ... */
2900 		mddev->events ++;
2901 		mddev->can_decrease_events = nospares;
2902 	}
2903 
2904 	/*
2905 	 * This 64-bit counter should never wrap.
2906 	 * Either we are in around ~1 trillion A.C., assuming
2907 	 * 1 reboot per second, or we have a bug...
2908 	 */
2909 	WARN_ON(mddev->events == 0);
2910 
2911 	rdev_for_each(rdev, mddev) {
2912 		if (rdev->badblocks.changed)
2913 			any_badblocks_changed++;
2914 		if (test_bit(Faulty, &rdev->flags))
2915 			set_bit(FaultRecorded, &rdev->flags);
2916 	}
2917 
2918 	sync_sbs(mddev, nospares);
2919 	spin_unlock(&mddev->lock);
2920 
2921 	pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
2922 		 mdname(mddev), mddev->in_sync);
2923 
2924 	mddev_add_trace_msg(mddev, "md md_update_sb");
2925 rewrite:
2926 	if (md_bitmap_enabled(mddev, false))
2927 		mddev->bitmap_ops->update_sb(mddev->bitmap);
2928 	rdev_for_each(rdev, mddev) {
2929 		if (rdev->sb_loaded != 1)
2930 			continue; /* no noise on spare devices */
2931 
2932 		if (!test_bit(Faulty, &rdev->flags)) {
2933 			md_write_metadata(mddev, rdev, rdev->sb_start,
2934 					  rdev->sb_size, rdev->sb_page, 0);
2935 			pr_debug("md: (write) %pg's sb offset: %llu\n",
2936 				 rdev->bdev,
2937 				 (unsigned long long)rdev->sb_start);
2938 			rdev->sb_events = mddev->events;
2939 			if (rdev->badblocks.size) {
2940 				md_write_metadata(mddev, rdev,
2941 						  rdev->badblocks.sector,
2942 						  rdev->badblocks.size << 9,
2943 						  rdev->bb_page, 0);
2944 				rdev->badblocks.size = 0;
2945 			}
2946 
2947 		} else
2948 			pr_debug("md: %pg (skipping faulty)\n",
2949 				 rdev->bdev);
2950 	}
2951 	if (md_super_wait(mddev) < 0)
2952 		goto rewrite;
2953 	/* if there was a failure, MD_SB_CHANGE_DEVS was set, and we re-write super */
2954 
2955 	if (mddev_is_clustered(mddev) && ret == 0)
2956 		mddev->cluster_ops->metadata_update_finish(mddev);
2957 
2958 	if (mddev->in_sync != sync_req ||
2959 	    !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
2960 			       BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_CLEAN)))
2961 		/* have to write it out again */
2962 		goto repeat;
2963 	wake_up(&mddev->sb_wait);
2964 	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
2965 		sysfs_notify_dirent_safe(mddev->sysfs_completed);
2966 
2967 	rdev_for_each(rdev, mddev) {
2968 		if (test_and_clear_bit(FaultRecorded, &rdev->flags))
2969 			clear_bit(Blocked, &rdev->flags);
2970 
2971 		if (any_badblocks_changed)
2972 			ack_all_badblocks(&rdev->badblocks);
2973 		clear_bit(BlockedBadBlocks, &rdev->flags);
2974 		wake_up(&rdev->blocked_wait);
2975 	}
2976 }
2977 EXPORT_SYMBOL(md_update_sb);
2978 
add_bound_rdev(struct md_rdev * rdev)2979 static int add_bound_rdev(struct md_rdev *rdev)
2980 {
2981 	struct mddev *mddev = rdev->mddev;
2982 	int err = 0;
2983 	bool add_journal = test_bit(Journal, &rdev->flags);
2984 
2985 	if (!mddev->pers->hot_remove_disk || add_journal) {
2986 		/* If there is hot_add_disk but no hot_remove_disk
2987 		 * then added disks for geometry changes,
2988 		 * and should be added immediately.
2989 		 */
2990 		super_types[mddev->major_version].
2991 			validate_super(mddev, NULL/*freshest*/, rdev);
2992 		err = mddev->pers->hot_add_disk(mddev, rdev);
2993 		if (err) {
2994 			md_kick_rdev_from_array(rdev);
2995 			return err;
2996 		}
2997 	}
2998 	sysfs_notify_dirent_safe(rdev->sysfs_state);
2999 
3000 	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
3001 	if (mddev->degraded)
3002 		set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
3003 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3004 	md_new_event();
3005 	return 0;
3006 }
3007 
3008 /* words written to sysfs files may, or may not, be \n terminated.
3009  * We want to accept with case. For this we use cmd_match.
3010  */
cmd_match(const char * cmd,const char * str)3011 static int cmd_match(const char *cmd, const char *str)
3012 {
3013 	/* See if cmd, written into a sysfs file, matches
3014 	 * str.  They must either be the same, or cmd can
3015 	 * have a trailing newline
3016 	 */
3017 	while (*cmd && *str && *cmd == *str) {
3018 		cmd++;
3019 		str++;
3020 	}
3021 	if (*cmd == '\n')
3022 		cmd++;
3023 	if (*str || *cmd)
3024 		return 0;
3025 	return 1;
3026 }
3027 
3028 struct rdev_sysfs_entry {
3029 	struct attribute attr;
3030 	ssize_t (*show)(struct md_rdev *, char *);
3031 	ssize_t (*store)(struct md_rdev *, const char *, size_t);
3032 };
3033 
3034 static ssize_t
state_show(struct md_rdev * rdev,char * page)3035 state_show(struct md_rdev *rdev, char *page)
3036 {
3037 	char *sep = ",";
3038 	size_t len = 0;
3039 	unsigned long flags = READ_ONCE(rdev->flags);
3040 
3041 	if (test_bit(Faulty, &flags) ||
3042 	    (!test_bit(ExternalBbl, &flags) &&
3043 	    rdev->badblocks.unacked_exist))
3044 		len += sprintf(page+len, "faulty%s", sep);
3045 	if (test_bit(In_sync, &flags))
3046 		len += sprintf(page+len, "in_sync%s", sep);
3047 	if (test_bit(Journal, &flags))
3048 		len += sprintf(page+len, "journal%s", sep);
3049 	if (test_bit(WriteMostly, &flags))
3050 		len += sprintf(page+len, "write_mostly%s", sep);
3051 	if (test_bit(Blocked, &flags) ||
3052 	    (rdev->badblocks.unacked_exist
3053 	     && !test_bit(Faulty, &flags)))
3054 		len += sprintf(page+len, "blocked%s", sep);
3055 	if (!test_bit(Faulty, &flags) &&
3056 	    !test_bit(Journal, &flags) &&
3057 	    !test_bit(In_sync, &flags))
3058 		len += sprintf(page+len, "spare%s", sep);
3059 	if (test_bit(WriteErrorSeen, &flags))
3060 		len += sprintf(page+len, "write_error%s", sep);
3061 	if (test_bit(WantReplacement, &flags))
3062 		len += sprintf(page+len, "want_replacement%s", sep);
3063 	if (test_bit(Replacement, &flags))
3064 		len += sprintf(page+len, "replacement%s", sep);
3065 	if (test_bit(ExternalBbl, &flags))
3066 		len += sprintf(page+len, "external_bbl%s", sep);
3067 	if (test_bit(FailFast, &flags))
3068 		len += sprintf(page+len, "failfast%s", sep);
3069 
3070 	if (len)
3071 		len -= strlen(sep);
3072 
3073 	return len+sprintf(page+len, "\n");
3074 }
3075 
3076 static ssize_t
state_store(struct md_rdev * rdev,const char * buf,size_t len)3077 state_store(struct md_rdev *rdev, const char *buf, size_t len)
3078 {
3079 	/* can write
3080 	 *  faulty  - simulates an error
3081 	 *  remove  - disconnects the device
3082 	 *  writemostly - sets write_mostly
3083 	 *  -writemostly - clears write_mostly
3084 	 *  blocked - sets the Blocked flags
3085 	 *  -blocked - clears the Blocked and possibly simulates an error
3086 	 *  insync - sets Insync providing device isn't active
3087 	 *  -insync - clear Insync for a device with a slot assigned,
3088 	 *            so that it gets rebuilt based on bitmap
3089 	 *  write_error - sets WriteErrorSeen
3090 	 *  -write_error - clears WriteErrorSeen
3091 	 *  {,-}failfast - set/clear FailFast
3092 	 */
3093 
3094 	struct mddev *mddev = rdev->mddev;
3095 	int err = -EINVAL;
3096 	bool need_update_sb = false;
3097 
3098 	if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
3099 		md_error(rdev->mddev, rdev);
3100 
3101 		if (test_bit(MD_BROKEN, &rdev->mddev->flags))
3102 			err = -EBUSY;
3103 		else
3104 			err = 0;
3105 	} else if (cmd_match(buf, "remove")) {
3106 		if (rdev->mddev->pers) {
3107 			clear_bit(Blocked, &rdev->flags);
3108 			remove_and_add_spares(rdev->mddev, rdev);
3109 		}
3110 		if (rdev->raid_disk >= 0)
3111 			err = -EBUSY;
3112 		else {
3113 			err = 0;
3114 			if (mddev_is_clustered(mddev))
3115 				err = mddev->cluster_ops->remove_disk(mddev, rdev);
3116 
3117 			if (err == 0) {
3118 				md_kick_rdev_from_array(rdev);
3119 				if (mddev->pers)
3120 					set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
3121 				md_new_event();
3122 			}
3123 		}
3124 	} else if (cmd_match(buf, "writemostly")) {
3125 		set_bit(WriteMostly, &rdev->flags);
3126 		mddev_create_serial_pool(rdev->mddev, rdev);
3127 		need_update_sb = true;
3128 		err = 0;
3129 	} else if (cmd_match(buf, "-writemostly")) {
3130 		mddev_destroy_serial_pool(rdev->mddev, rdev);
3131 		clear_bit(WriteMostly, &rdev->flags);
3132 		need_update_sb = true;
3133 		err = 0;
3134 	} else if (cmd_match(buf, "blocked")) {
3135 		set_bit(Blocked, &rdev->flags);
3136 		err = 0;
3137 	} else if (cmd_match(buf, "-blocked")) {
3138 		if (!test_bit(Faulty, &rdev->flags) &&
3139 		    !test_bit(ExternalBbl, &rdev->flags) &&
3140 		    rdev->badblocks.unacked_exist) {
3141 			/* metadata handler doesn't understand badblocks,
3142 			 * so we need to fail the device
3143 			 */
3144 			md_error(rdev->mddev, rdev);
3145 		}
3146 		clear_bit(Blocked, &rdev->flags);
3147 		clear_bit(BlockedBadBlocks, &rdev->flags);
3148 		wake_up(&rdev->blocked_wait);
3149 		set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3150 
3151 		err = 0;
3152 	} else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
3153 		set_bit(In_sync, &rdev->flags);
3154 		err = 0;
3155 	} else if (cmd_match(buf, "failfast")) {
3156 		set_bit(FailFast, &rdev->flags);
3157 		need_update_sb = true;
3158 		err = 0;
3159 	} else if (cmd_match(buf, "-failfast")) {
3160 		clear_bit(FailFast, &rdev->flags);
3161 		need_update_sb = true;
3162 		err = 0;
3163 	} else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 &&
3164 		   !test_bit(Journal, &rdev->flags)) {
3165 		if (rdev->mddev->pers == NULL) {
3166 			clear_bit(In_sync, &rdev->flags);
3167 			rdev->saved_raid_disk = rdev->raid_disk;
3168 			rdev->raid_disk = -1;
3169 			err = 0;
3170 		}
3171 	} else if (cmd_match(buf, "write_error")) {
3172 		set_bit(WriteErrorSeen, &rdev->flags);
3173 		err = 0;
3174 	} else if (cmd_match(buf, "-write_error")) {
3175 		clear_bit(WriteErrorSeen, &rdev->flags);
3176 		err = 0;
3177 	} else if (cmd_match(buf, "want_replacement")) {
3178 		/* Any non-spare device that is not a replacement can
3179 		 * become want_replacement at any time, but we then need to
3180 		 * check if recovery is needed.
3181 		 */
3182 		if (rdev->raid_disk >= 0 &&
3183 		    !test_bit(Journal, &rdev->flags) &&
3184 		    !test_bit(Replacement, &rdev->flags))
3185 			set_bit(WantReplacement, &rdev->flags);
3186 		set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3187 		err = 0;
3188 	} else if (cmd_match(buf, "-want_replacement")) {
3189 		/* Clearing 'want_replacement' is always allowed.
3190 		 * Once replacements starts it is too late though.
3191 		 */
3192 		err = 0;
3193 		clear_bit(WantReplacement, &rdev->flags);
3194 	} else if (cmd_match(buf, "replacement")) {
3195 		/* Can only set a device as a replacement when array has not
3196 		 * yet been started.  Once running, replacement is automatic
3197 		 * from spares, or by assigning 'slot'.
3198 		 */
3199 		if (rdev->mddev->pers)
3200 			err = -EBUSY;
3201 		else {
3202 			set_bit(Replacement, &rdev->flags);
3203 			err = 0;
3204 		}
3205 	} else if (cmd_match(buf, "-replacement")) {
3206 		/* Similarly, can only clear Replacement before start */
3207 		if (rdev->mddev->pers)
3208 			err = -EBUSY;
3209 		else {
3210 			clear_bit(Replacement, &rdev->flags);
3211 			err = 0;
3212 		}
3213 	} else if (cmd_match(buf, "re-add")) {
3214 		if (!rdev->mddev->pers)
3215 			err = -EINVAL;
3216 		else if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
3217 				rdev->saved_raid_disk >= 0) {
3218 			/* clear_bit is performed _after_ all the devices
3219 			 * have their local Faulty bit cleared. If any writes
3220 			 * happen in the meantime in the local node, they
3221 			 * will land in the local bitmap, which will be synced
3222 			 * by this node eventually
3223 			 */
3224 			if (!mddev_is_clustered(rdev->mddev) ||
3225 			    (err = mddev->cluster_ops->gather_bitmaps(rdev)) == 0) {
3226 				clear_bit(Faulty, &rdev->flags);
3227 				err = add_bound_rdev(rdev);
3228 			}
3229 		} else
3230 			err = -EBUSY;
3231 	} else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) {
3232 		set_bit(ExternalBbl, &rdev->flags);
3233 		rdev->badblocks.shift = 0;
3234 		err = 0;
3235 	} else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) {
3236 		clear_bit(ExternalBbl, &rdev->flags);
3237 		err = 0;
3238 	}
3239 	if (need_update_sb)
3240 		md_update_sb(mddev, 1);
3241 	if (!err)
3242 		sysfs_notify_dirent_safe(rdev->sysfs_state);
3243 	return err ? err : len;
3244 }
3245 static struct rdev_sysfs_entry rdev_state =
3246 __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
3247 
3248 static ssize_t
errors_show(struct md_rdev * rdev,char * page)3249 errors_show(struct md_rdev *rdev, char *page)
3250 {
3251 	return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
3252 }
3253 
3254 static ssize_t
errors_store(struct md_rdev * rdev,const char * buf,size_t len)3255 errors_store(struct md_rdev *rdev, const char *buf, size_t len)
3256 {
3257 	unsigned int n;
3258 	int rv;
3259 
3260 	rv = kstrtouint(buf, 10, &n);
3261 	if (rv < 0)
3262 		return rv;
3263 	atomic_set(&rdev->corrected_errors, n);
3264 	return len;
3265 }
3266 static struct rdev_sysfs_entry rdev_errors =
3267 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
3268 
3269 static ssize_t
slot_show(struct md_rdev * rdev,char * page)3270 slot_show(struct md_rdev *rdev, char *page)
3271 {
3272 	if (test_bit(Journal, &rdev->flags))
3273 		return sprintf(page, "journal\n");
3274 	else if (rdev->raid_disk < 0)
3275 		return sprintf(page, "none\n");
3276 	else
3277 		return sprintf(page, "%d\n", rdev->raid_disk);
3278 }
3279 
3280 static ssize_t
slot_store(struct md_rdev * rdev,const char * buf,size_t len)3281 slot_store(struct md_rdev *rdev, const char *buf, size_t len)
3282 {
3283 	int slot;
3284 	int err;
3285 
3286 	if (test_bit(Journal, &rdev->flags))
3287 		return -EBUSY;
3288 	if (strncmp(buf, "none", 4)==0)
3289 		slot = -1;
3290 	else {
3291 		err = kstrtouint(buf, 10, (unsigned int *)&slot);
3292 		if (err < 0)
3293 			return err;
3294 		if (slot < 0)
3295 			/* overflow */
3296 			return -ENOSPC;
3297 	}
3298 	if (rdev->mddev->pers && slot == -1) {
3299 		/* Setting 'slot' on an active array requires also
3300 		 * updating the 'rd%d' link, and communicating
3301 		 * with the personality with ->hot_*_disk.
3302 		 * For now we only support removing
3303 		 * failed/spare devices.  This normally happens automatically,
3304 		 * but not when the metadata is externally managed.
3305 		 */
3306 		if (rdev->raid_disk == -1)
3307 			return -EEXIST;
3308 		/* personality does all needed checks */
3309 		if (rdev->mddev->pers->hot_remove_disk == NULL)
3310 			return -EINVAL;
3311 		clear_bit(Blocked, &rdev->flags);
3312 		remove_and_add_spares(rdev->mddev, rdev);
3313 		if (rdev->raid_disk >= 0)
3314 			return -EBUSY;
3315 		set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3316 	} else if (rdev->mddev->pers) {
3317 		/* Activating a spare .. or possibly reactivating
3318 		 * if we ever get bitmaps working here.
3319 		 */
3320 		int err;
3321 
3322 		if (rdev->raid_disk != -1)
3323 			return -EBUSY;
3324 
3325 		if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
3326 			return -EBUSY;
3327 
3328 		if (rdev->mddev->pers->hot_add_disk == NULL)
3329 			return -EINVAL;
3330 
3331 		if (slot >= rdev->mddev->raid_disks &&
3332 		    slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
3333 			return -ENOSPC;
3334 
3335 		rdev->raid_disk = slot;
3336 		if (test_bit(In_sync, &rdev->flags))
3337 			rdev->saved_raid_disk = slot;
3338 		else
3339 			rdev->saved_raid_disk = -1;
3340 		clear_bit(In_sync, &rdev->flags);
3341 		clear_bit(Bitmap_sync, &rdev->flags);
3342 		err = rdev->mddev->pers->hot_add_disk(rdev->mddev, rdev);
3343 		if (err) {
3344 			rdev->raid_disk = -1;
3345 			return err;
3346 		} else
3347 			sysfs_notify_dirent_safe(rdev->sysfs_state);
3348 		/* failure here is OK */;
3349 		sysfs_link_rdev(rdev->mddev, rdev);
3350 		/* don't wakeup anyone, leave that to userspace. */
3351 	} else {
3352 		if (slot >= rdev->mddev->raid_disks &&
3353 		    slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
3354 			return -ENOSPC;
3355 		rdev->raid_disk = slot;
3356 		/* assume it is working */
3357 		clear_bit(Faulty, &rdev->flags);
3358 		clear_bit(WriteMostly, &rdev->flags);
3359 		set_bit(In_sync, &rdev->flags);
3360 		sysfs_notify_dirent_safe(rdev->sysfs_state);
3361 	}
3362 	return len;
3363 }
3364 
3365 static struct rdev_sysfs_entry rdev_slot =
3366 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
3367 
3368 static ssize_t
offset_show(struct md_rdev * rdev,char * page)3369 offset_show(struct md_rdev *rdev, char *page)
3370 {
3371 	return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
3372 }
3373 
3374 static ssize_t
offset_store(struct md_rdev * rdev,const char * buf,size_t len)3375 offset_store(struct md_rdev *rdev, const char *buf, size_t len)
3376 {
3377 	unsigned long long offset;
3378 	if (kstrtoull(buf, 10, &offset) < 0)
3379 		return -EINVAL;
3380 	if (rdev->mddev->pers && rdev->raid_disk >= 0)
3381 		return -EBUSY;
3382 	if (rdev->sectors && rdev->mddev->external)
3383 		/* Must set offset before size, so overlap checks
3384 		 * can be sane */
3385 		return -EBUSY;
3386 	rdev->data_offset = offset;
3387 	rdev->new_data_offset = offset;
3388 	return len;
3389 }
3390 
3391 static struct rdev_sysfs_entry rdev_offset =
3392 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
3393 
new_offset_show(struct md_rdev * rdev,char * page)3394 static ssize_t new_offset_show(struct md_rdev *rdev, char *page)
3395 {
3396 	return sprintf(page, "%llu\n",
3397 		       (unsigned long long)rdev->new_data_offset);
3398 }
3399 
new_offset_store(struct md_rdev * rdev,const char * buf,size_t len)3400 static ssize_t new_offset_store(struct md_rdev *rdev,
3401 				const char *buf, size_t len)
3402 {
3403 	unsigned long long new_offset;
3404 	struct mddev *mddev = rdev->mddev;
3405 
3406 	if (kstrtoull(buf, 10, &new_offset) < 0)
3407 		return -EINVAL;
3408 
3409 	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3410 		return -EBUSY;
3411 	if (new_offset == rdev->data_offset)
3412 		/* reset is always permitted */
3413 		;
3414 	else if (new_offset > rdev->data_offset) {
3415 		/* must not push array size beyond rdev_sectors */
3416 		if (new_offset - rdev->data_offset
3417 		    + mddev->dev_sectors > rdev->sectors)
3418 				return -E2BIG;
3419 	}
3420 	/* Metadata worries about other space details. */
3421 
3422 	/* decreasing the offset is inconsistent with a backwards
3423 	 * reshape.
3424 	 */
3425 	if (new_offset < rdev->data_offset &&
3426 	    mddev->reshape_backwards)
3427 		return -EINVAL;
3428 	/* Increasing offset is inconsistent with forwards
3429 	 * reshape.  reshape_direction should be set to
3430 	 * 'backwards' first.
3431 	 */
3432 	if (new_offset > rdev->data_offset &&
3433 	    !mddev->reshape_backwards)
3434 		return -EINVAL;
3435 
3436 	if (mddev->pers && mddev->persistent &&
3437 	    !super_types[mddev->major_version]
3438 	    .allow_new_offset(rdev, new_offset))
3439 		return -E2BIG;
3440 	rdev->new_data_offset = new_offset;
3441 	if (new_offset > rdev->data_offset)
3442 		mddev->reshape_backwards = 1;
3443 	else if (new_offset < rdev->data_offset)
3444 		mddev->reshape_backwards = 0;
3445 
3446 	return len;
3447 }
3448 static struct rdev_sysfs_entry rdev_new_offset =
3449 __ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store);
3450 
3451 static ssize_t
rdev_size_show(struct md_rdev * rdev,char * page)3452 rdev_size_show(struct md_rdev *rdev, char *page)
3453 {
3454 	return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
3455 }
3456 
md_rdevs_overlap(struct md_rdev * a,struct md_rdev * b)3457 static int md_rdevs_overlap(struct md_rdev *a, struct md_rdev *b)
3458 {
3459 	/* check if two start/length pairs overlap */
3460 	if (a->data_offset + a->sectors <= b->data_offset)
3461 		return false;
3462 	if (b->data_offset + b->sectors <= a->data_offset)
3463 		return false;
3464 	return true;
3465 }
3466 
md_rdev_overlaps(struct md_rdev * rdev)3467 static bool md_rdev_overlaps(struct md_rdev *rdev)
3468 {
3469 	struct mddev *mddev;
3470 	struct md_rdev *rdev2;
3471 
3472 	spin_lock(&all_mddevs_lock);
3473 	list_for_each_entry(mddev, &all_mddevs, all_mddevs) {
3474 		if (test_bit(MD_DELETED, &mddev->flags))
3475 			continue;
3476 		rdev_for_each(rdev2, mddev) {
3477 			if (rdev != rdev2 && rdev->bdev == rdev2->bdev &&
3478 			    md_rdevs_overlap(rdev, rdev2)) {
3479 				spin_unlock(&all_mddevs_lock);
3480 				return true;
3481 			}
3482 		}
3483 	}
3484 	spin_unlock(&all_mddevs_lock);
3485 	return false;
3486 }
3487 
strict_blocks_to_sectors(const char * buf,sector_t * sectors)3488 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
3489 {
3490 	unsigned long long blocks;
3491 	sector_t new;
3492 
3493 	if (kstrtoull(buf, 10, &blocks) < 0)
3494 		return -EINVAL;
3495 
3496 	if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
3497 		return -EINVAL; /* sector conversion overflow */
3498 
3499 	new = blocks * 2;
3500 	if (new != blocks * 2)
3501 		return -EINVAL; /* unsigned long long to sector_t overflow */
3502 
3503 	*sectors = new;
3504 	return 0;
3505 }
3506 
3507 static ssize_t
rdev_size_store(struct md_rdev * rdev,const char * buf,size_t len)3508 rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
3509 {
3510 	struct mddev *my_mddev = rdev->mddev;
3511 	sector_t oldsectors = rdev->sectors;
3512 	sector_t sectors;
3513 
3514 	if (test_bit(Journal, &rdev->flags))
3515 		return -EBUSY;
3516 	if (strict_blocks_to_sectors(buf, &sectors) < 0)
3517 		return -EINVAL;
3518 	if (rdev->data_offset != rdev->new_data_offset)
3519 		return -EINVAL; /* too confusing */
3520 	if (my_mddev->pers && rdev->raid_disk >= 0) {
3521 		if (my_mddev->persistent) {
3522 			sectors = super_types[my_mddev->major_version].
3523 				rdev_size_change(rdev, sectors);
3524 			if (!sectors)
3525 				return -EBUSY;
3526 		} else if (!sectors)
3527 			sectors = bdev_nr_sectors(rdev->bdev) -
3528 				rdev->data_offset;
3529 		if (!my_mddev->pers->resize)
3530 			/* Cannot change size for RAID0 or Linear etc */
3531 			return -EINVAL;
3532 	}
3533 	if (sectors < my_mddev->dev_sectors)
3534 		return -EINVAL; /* component must fit device */
3535 
3536 	rdev->sectors = sectors;
3537 
3538 	/*
3539 	 * Check that all other rdevs with the same bdev do not overlap.  This
3540 	 * check does not provide a hard guarantee, it just helps avoid
3541 	 * dangerous mistakes.
3542 	 */
3543 	if (sectors > oldsectors && my_mddev->external &&
3544 	    md_rdev_overlaps(rdev)) {
3545 		/*
3546 		 * Someone else could have slipped in a size change here, but
3547 		 * doing so is just silly.  We put oldsectors back because we
3548 		 * know it is safe, and trust userspace not to race with itself.
3549 		 */
3550 		rdev->sectors = oldsectors;
3551 		return -EBUSY;
3552 	}
3553 	return len;
3554 }
3555 
3556 static struct rdev_sysfs_entry rdev_size =
3557 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
3558 
recovery_start_show(struct md_rdev * rdev,char * page)3559 static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
3560 {
3561 	unsigned long long recovery_start = rdev->recovery_offset;
3562 
3563 	if (test_bit(In_sync, &rdev->flags) ||
3564 	    recovery_start == MaxSector)
3565 		return sprintf(page, "none\n");
3566 
3567 	return sprintf(page, "%llu\n", recovery_start);
3568 }
3569 
recovery_start_store(struct md_rdev * rdev,const char * buf,size_t len)3570 static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len)
3571 {
3572 	unsigned long long recovery_start;
3573 
3574 	if (cmd_match(buf, "none"))
3575 		recovery_start = MaxSector;
3576 	else if (kstrtoull(buf, 10, &recovery_start))
3577 		return -EINVAL;
3578 
3579 	if (rdev->mddev->pers &&
3580 	    rdev->raid_disk >= 0)
3581 		return -EBUSY;
3582 
3583 	rdev->recovery_offset = recovery_start;
3584 	if (recovery_start == MaxSector)
3585 		set_bit(In_sync, &rdev->flags);
3586 	else
3587 		clear_bit(In_sync, &rdev->flags);
3588 	return len;
3589 }
3590 
3591 static struct rdev_sysfs_entry rdev_recovery_start =
3592 __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
3593 
3594 /* sysfs access to bad-blocks list.
3595  * We present two files.
3596  * 'bad-blocks' lists sector numbers and lengths of ranges that
3597  *    are recorded as bad.  The list is truncated to fit within
3598  *    the one-page limit of sysfs.
3599  *    Writing "sector length" to this file adds an acknowledged
3600  *    bad block list.
3601  * 'unacknowledged-bad-blocks' lists bad blocks that have not yet
3602  *    been acknowledged.  Writing to this file adds bad blocks
3603  *    without acknowledging them.  This is largely for testing.
3604  */
bb_show(struct md_rdev * rdev,char * page)3605 static ssize_t bb_show(struct md_rdev *rdev, char *page)
3606 {
3607 	return badblocks_show(&rdev->badblocks, page, 0);
3608 }
bb_store(struct md_rdev * rdev,const char * page,size_t len)3609 static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
3610 {
3611 	int rv = badblocks_store(&rdev->badblocks, page, len, 0);
3612 	/* Maybe that ack was all we needed */
3613 	if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
3614 		wake_up(&rdev->blocked_wait);
3615 	return rv;
3616 }
3617 static struct rdev_sysfs_entry rdev_bad_blocks =
3618 __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
3619 
ubb_show(struct md_rdev * rdev,char * page)3620 static ssize_t ubb_show(struct md_rdev *rdev, char *page)
3621 {
3622 	return badblocks_show(&rdev->badblocks, page, 1);
3623 }
ubb_store(struct md_rdev * rdev,const char * page,size_t len)3624 static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
3625 {
3626 	return badblocks_store(&rdev->badblocks, page, len, 1);
3627 }
3628 static struct rdev_sysfs_entry rdev_unack_bad_blocks =
3629 __ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);
3630 
3631 static ssize_t
ppl_sector_show(struct md_rdev * rdev,char * page)3632 ppl_sector_show(struct md_rdev *rdev, char *page)
3633 {
3634 	return sprintf(page, "%llu\n", (unsigned long long)rdev->ppl.sector);
3635 }
3636 
3637 static ssize_t
ppl_sector_store(struct md_rdev * rdev,const char * buf,size_t len)3638 ppl_sector_store(struct md_rdev *rdev, const char *buf, size_t len)
3639 {
3640 	unsigned long long sector;
3641 
3642 	if (kstrtoull(buf, 10, &sector) < 0)
3643 		return -EINVAL;
3644 	if (sector != (sector_t)sector)
3645 		return -EINVAL;
3646 
3647 	if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) &&
3648 	    rdev->raid_disk >= 0)
3649 		return -EBUSY;
3650 
3651 	if (rdev->mddev->persistent) {
3652 		if (rdev->mddev->major_version == 0)
3653 			return -EINVAL;
3654 		if ((sector > rdev->sb_start &&
3655 		     sector - rdev->sb_start > S16_MAX) ||
3656 		    (sector < rdev->sb_start &&
3657 		     rdev->sb_start - sector > -S16_MIN))
3658 			return -EINVAL;
3659 		rdev->ppl.offset = sector - rdev->sb_start;
3660 	} else if (!rdev->mddev->external) {
3661 		return -EBUSY;
3662 	}
3663 	rdev->ppl.sector = sector;
3664 	return len;
3665 }
3666 
3667 static struct rdev_sysfs_entry rdev_ppl_sector =
3668 __ATTR(ppl_sector, S_IRUGO|S_IWUSR, ppl_sector_show, ppl_sector_store);
3669 
3670 static ssize_t
ppl_size_show(struct md_rdev * rdev,char * page)3671 ppl_size_show(struct md_rdev *rdev, char *page)
3672 {
3673 	return sprintf(page, "%u\n", rdev->ppl.size);
3674 }
3675 
3676 static ssize_t
ppl_size_store(struct md_rdev * rdev,const char * buf,size_t len)3677 ppl_size_store(struct md_rdev *rdev, const char *buf, size_t len)
3678 {
3679 	unsigned int size;
3680 
3681 	if (kstrtouint(buf, 10, &size) < 0)
3682 		return -EINVAL;
3683 
3684 	if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) &&
3685 	    rdev->raid_disk >= 0)
3686 		return -EBUSY;
3687 
3688 	if (rdev->mddev->persistent) {
3689 		if (rdev->mddev->major_version == 0)
3690 			return -EINVAL;
3691 		if (size > U16_MAX)
3692 			return -EINVAL;
3693 	} else if (!rdev->mddev->external) {
3694 		return -EBUSY;
3695 	}
3696 	rdev->ppl.size = size;
3697 	return len;
3698 }
3699 
3700 static struct rdev_sysfs_entry rdev_ppl_size =
3701 __ATTR(ppl_size, S_IRUGO|S_IWUSR, ppl_size_show, ppl_size_store);
3702 
3703 static struct attribute *rdev_default_attrs[] = {
3704 	&rdev_state.attr,
3705 	&rdev_errors.attr,
3706 	&rdev_slot.attr,
3707 	&rdev_offset.attr,
3708 	&rdev_new_offset.attr,
3709 	&rdev_size.attr,
3710 	&rdev_recovery_start.attr,
3711 	&rdev_bad_blocks.attr,
3712 	&rdev_unack_bad_blocks.attr,
3713 	&rdev_ppl_sector.attr,
3714 	&rdev_ppl_size.attr,
3715 	NULL,
3716 };
3717 ATTRIBUTE_GROUPS(rdev_default);
3718 static ssize_t
rdev_attr_show(struct kobject * kobj,struct attribute * attr,char * page)3719 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
3720 {
3721 	struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
3722 	struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3723 
3724 	if (!entry->show)
3725 		return -EIO;
3726 	if (!rdev->mddev)
3727 		return -ENODEV;
3728 	return entry->show(rdev, page);
3729 }
3730 
3731 static ssize_t
rdev_attr_store(struct kobject * kobj,struct attribute * attr,const char * page,size_t length)3732 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
3733 	      const char *page, size_t length)
3734 {
3735 	struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
3736 	struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3737 	struct kernfs_node *kn = NULL;
3738 	bool suspend = false;
3739 	ssize_t rv;
3740 	struct mddev *mddev = READ_ONCE(rdev->mddev);
3741 
3742 	if (!entry->store)
3743 		return -EIO;
3744 	if (!capable(CAP_SYS_ADMIN))
3745 		return -EACCES;
3746 	if (!mddev)
3747 		return -ENODEV;
3748 
3749 	if (entry->store == state_store) {
3750 		if (cmd_match(page, "remove"))
3751 			kn = sysfs_break_active_protection(kobj, attr);
3752 		if (cmd_match(page, "remove") || cmd_match(page, "re-add") ||
3753 		    cmd_match(page, "writemostly") ||
3754 		    cmd_match(page, "-writemostly"))
3755 			suspend = true;
3756 	}
3757 
3758 	rv = suspend ? mddev_suspend_and_lock(mddev) : mddev_lock(mddev);
3759 	if (!rv) {
3760 		if (rdev->mddev == NULL)
3761 			rv = -ENODEV;
3762 		else
3763 			rv = entry->store(rdev, page, length);
3764 		suspend ? mddev_unlock_and_resume(mddev) : mddev_unlock(mddev);
3765 	}
3766 
3767 	if (kn)
3768 		sysfs_unbreak_active_protection(kn);
3769 
3770 	return rv;
3771 }
3772 
rdev_free(struct kobject * ko)3773 static void rdev_free(struct kobject *ko)
3774 {
3775 	struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj);
3776 	kfree(rdev);
3777 }
3778 static const struct sysfs_ops rdev_sysfs_ops = {
3779 	.show		= rdev_attr_show,
3780 	.store		= rdev_attr_store,
3781 };
3782 static const struct kobj_type rdev_ktype = {
3783 	.release	= rdev_free,
3784 	.sysfs_ops	= &rdev_sysfs_ops,
3785 	.default_groups	= rdev_default_groups,
3786 };
3787 
md_rdev_init(struct md_rdev * rdev)3788 int md_rdev_init(struct md_rdev *rdev)
3789 {
3790 	rdev->desc_nr = -1;
3791 	rdev->saved_raid_disk = -1;
3792 	rdev->raid_disk = -1;
3793 	rdev->flags = 0;
3794 	rdev->data_offset = 0;
3795 	rdev->new_data_offset = 0;
3796 	rdev->sb_events = 0;
3797 	rdev->last_read_error = 0;
3798 	rdev->sb_loaded = 0;
3799 	rdev->bb_page = NULL;
3800 	atomic_set(&rdev->nr_pending, 0);
3801 	atomic_set(&rdev->read_errors, 0);
3802 	atomic_set(&rdev->corrected_errors, 0);
3803 
3804 	INIT_LIST_HEAD(&rdev->same_set);
3805 	init_waitqueue_head(&rdev->blocked_wait);
3806 
3807 	/* Add space to store bad block list.
3808 	 * This reserves the space even on arrays where it cannot
3809 	 * be used - I wonder if that matters
3810 	 */
3811 	return badblocks_init(&rdev->badblocks, 0);
3812 }
3813 EXPORT_SYMBOL_GPL(md_rdev_init);
3814 
3815 /*
3816  * Import a device. If 'super_format' >= 0, then sanity check the superblock
3817  *
3818  * mark the device faulty if:
3819  *
3820  *   - the device is nonexistent (zero size)
3821  *   - the device has no valid superblock
3822  *
3823  * a faulty rdev _never_ has rdev->sb set.
3824  */
md_import_device(dev_t newdev,int super_format,int super_minor)3825 static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
3826 {
3827 	struct md_rdev *rdev;
3828 	sector_t size;
3829 	int err;
3830 
3831 	rdev = kzalloc_obj(*rdev);
3832 	if (!rdev)
3833 		return ERR_PTR(-ENOMEM);
3834 
3835 	err = md_rdev_init(rdev);
3836 	if (err)
3837 		goto out_free_rdev;
3838 	err = alloc_disk_sb(rdev);
3839 	if (err)
3840 		goto out_clear_rdev;
3841 
3842 	rdev->bdev_file = bdev_file_open_by_dev(newdev,
3843 			BLK_OPEN_READ | BLK_OPEN_WRITE,
3844 			super_format == -2 ? &claim_rdev : rdev, NULL);
3845 	if (IS_ERR(rdev->bdev_file)) {
3846 		pr_warn("md: could not open device unknown-block(%u,%u).\n",
3847 			MAJOR(newdev), MINOR(newdev));
3848 		err = PTR_ERR(rdev->bdev_file);
3849 		goto out_clear_rdev;
3850 	}
3851 	rdev->bdev = file_bdev(rdev->bdev_file);
3852 
3853 	kobject_init(&rdev->kobj, &rdev_ktype);
3854 
3855 	size = bdev_nr_bytes(rdev->bdev) >> BLOCK_SIZE_BITS;
3856 	if (!size) {
3857 		pr_warn("md: %pg has zero or unknown size, marking faulty!\n",
3858 			rdev->bdev);
3859 		err = -EINVAL;
3860 		goto out_blkdev_put;
3861 	}
3862 
3863 	if (super_format >= 0) {
3864 		err = super_types[super_format].
3865 			load_super(rdev, NULL, super_minor);
3866 		if (err == -EINVAL) {
3867 			pr_warn("md: %pg does not have a valid v%d.%d superblock, not importing!\n",
3868 				rdev->bdev,
3869 				super_format, super_minor);
3870 			goto out_blkdev_put;
3871 		}
3872 		if (err < 0) {
3873 			pr_warn("md: could not read %pg's sb, not importing!\n",
3874 				rdev->bdev);
3875 			goto out_blkdev_put;
3876 		}
3877 	}
3878 
3879 	return rdev;
3880 
3881 out_blkdev_put:
3882 	fput(rdev->bdev_file);
3883 out_clear_rdev:
3884 	md_rdev_clear(rdev);
3885 out_free_rdev:
3886 	kfree(rdev);
3887 	return ERR_PTR(err);
3888 }
3889 
3890 /*
3891  * Check a full RAID array for plausibility
3892  */
3893 
analyze_sbs(struct mddev * mddev)3894 static int analyze_sbs(struct mddev *mddev)
3895 {
3896 	struct md_rdev *rdev, *freshest, *tmp;
3897 
3898 	freshest = NULL;
3899 	rdev_for_each_safe(rdev, tmp, mddev)
3900 		switch (super_types[mddev->major_version].
3901 			load_super(rdev, freshest, mddev->minor_version)) {
3902 		case 1:
3903 			freshest = rdev;
3904 			break;
3905 		case 0:
3906 			break;
3907 		default:
3908 			pr_warn("md: fatal superblock inconsistency in %pg -- removing from array\n",
3909 				rdev->bdev);
3910 			md_kick_rdev_from_array(rdev);
3911 		}
3912 
3913 	/* Cannot find a valid fresh disk */
3914 	if (!freshest) {
3915 		pr_warn("md: cannot find a valid disk\n");
3916 		return -EINVAL;
3917 	}
3918 
3919 	super_types[mddev->major_version].
3920 		validate_super(mddev, NULL/*freshest*/, freshest);
3921 
3922 	rdev_for_each_safe(rdev, tmp, mddev) {
3923 		if (mddev->max_disks &&
3924 		    rdev->desc_nr >= mddev->max_disks) {
3925 			pr_warn("md: %s: %pg: only %d devices permitted\n",
3926 				mdname(mddev), rdev->bdev,
3927 				mddev->max_disks);
3928 			md_kick_rdev_from_array(rdev);
3929 			continue;
3930 		}
3931 		if (rdev != freshest) {
3932 			if (super_types[mddev->major_version].
3933 			    validate_super(mddev, freshest, rdev)) {
3934 				pr_warn("md: kicking non-fresh %pg from array!\n",
3935 					rdev->bdev);
3936 				md_kick_rdev_from_array(rdev);
3937 				continue;
3938 			}
3939 		}
3940 		if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks)) &&
3941 		    !test_bit(Journal, &rdev->flags)) {
3942 			rdev->raid_disk = -1;
3943 			clear_bit(In_sync, &rdev->flags);
3944 		}
3945 	}
3946 
3947 	return 0;
3948 }
3949 
3950 /* Read a fixed-point number.
3951  * Numbers in sysfs attributes should be in "standard" units where
3952  * possible, so time should be in seconds.
3953  * However we internally use a a much smaller unit such as
3954  * milliseconds or jiffies.
3955  * This function takes a decimal number with a possible fractional
3956  * component, and produces an integer which is the result of
3957  * multiplying that number by 10^'scale'.
3958  * all without any floating-point arithmetic.
3959  */
strict_strtoul_scaled(const char * cp,unsigned long * res,int scale)3960 int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
3961 {
3962 	unsigned long result = 0;
3963 	long decimals = -1;
3964 	while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
3965 		if (*cp == '.')
3966 			decimals = 0;
3967 		else if (decimals < scale) {
3968 			unsigned int value;
3969 			value = *cp - '0';
3970 			result = result * 10 + value;
3971 			if (decimals >= 0)
3972 				decimals++;
3973 		}
3974 		cp++;
3975 	}
3976 	if (*cp == '\n')
3977 		cp++;
3978 	if (*cp)
3979 		return -EINVAL;
3980 	if (decimals < 0)
3981 		decimals = 0;
3982 	*res = result * int_pow(10, scale - decimals);
3983 	return 0;
3984 }
3985 
3986 static ssize_t
safe_delay_show(struct mddev * mddev,char * page)3987 safe_delay_show(struct mddev *mddev, char *page)
3988 {
3989 	unsigned int msec = ((unsigned long)mddev->safemode_delay*1000)/HZ;
3990 
3991 	return sprintf(page, "%u.%03u\n", msec/1000, msec%1000);
3992 }
3993 static ssize_t
safe_delay_store(struct mddev * mddev,const char * cbuf,size_t len)3994 safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
3995 {
3996 	unsigned long msec;
3997 
3998 	if (mddev_is_clustered(mddev)) {
3999 		pr_warn("md: Safemode is disabled for clustered mode\n");
4000 		return -EINVAL;
4001 	}
4002 
4003 	if (strict_strtoul_scaled(cbuf, &msec, 3) < 0 || msec > UINT_MAX / HZ)
4004 		return -EINVAL;
4005 	if (msec == 0)
4006 		mddev->safemode_delay = 0;
4007 	else {
4008 		unsigned long old_delay = mddev->safemode_delay;
4009 		unsigned long new_delay = (msec*HZ)/1000;
4010 
4011 		if (new_delay == 0)
4012 			new_delay = 1;
4013 		mddev->safemode_delay = new_delay;
4014 		if (new_delay < old_delay || old_delay == 0)
4015 			mod_timer(&mddev->safemode_timer, jiffies+1);
4016 	}
4017 	return len;
4018 }
4019 static struct md_sysfs_entry md_safe_delay =
4020 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
4021 
4022 static ssize_t
level_show(struct mddev * mddev,char * page)4023 level_show(struct mddev *mddev, char *page)
4024 {
4025 	struct md_personality *p;
4026 	int ret;
4027 	spin_lock(&mddev->lock);
4028 	p = mddev->pers;
4029 	if (p)
4030 		ret = sprintf(page, "%s\n", p->head.name);
4031 	else if (mddev->clevel[0])
4032 		ret = sprintf(page, "%s\n", mddev->clevel);
4033 	else if (mddev->level != LEVEL_NONE)
4034 		ret = sprintf(page, "%d\n", mddev->level);
4035 	else
4036 		ret = 0;
4037 	spin_unlock(&mddev->lock);
4038 	return ret;
4039 }
4040 
4041 static ssize_t
level_store(struct mddev * mddev,const char * buf,size_t len)4042 level_store(struct mddev *mddev, const char *buf, size_t len)
4043 {
4044 	char clevel[16];
4045 	ssize_t rv;
4046 	size_t slen = len;
4047 	struct md_personality *pers, *oldpers;
4048 	long level;
4049 	void *priv, *oldpriv;
4050 	struct md_rdev *rdev;
4051 
4052 	if (slen == 0 || slen >= sizeof(clevel))
4053 		return -EINVAL;
4054 
4055 	rv = mddev_suspend_and_lock(mddev);
4056 	if (rv)
4057 		return rv;
4058 
4059 	if (mddev->pers == NULL) {
4060 		memcpy(mddev->clevel, buf, slen);
4061 		if (mddev->clevel[slen-1] == '\n')
4062 			slen--;
4063 		mddev->clevel[slen] = 0;
4064 		mddev->level = LEVEL_NONE;
4065 		rv = len;
4066 		goto out_unlock;
4067 	}
4068 	rv = -EROFS;
4069 	if (!md_is_rdwr(mddev))
4070 		goto out_unlock;
4071 
4072 	/* request to change the personality.  Need to ensure:
4073 	 *  - array is not engaged in resync/recovery/reshape
4074 	 *  - old personality can be suspended
4075 	 *  - new personality will access other array.
4076 	 */
4077 
4078 	rv = -EBUSY;
4079 	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
4080 	    mddev->reshape_position != MaxSector ||
4081 	    mddev->sysfs_active)
4082 		goto out_unlock;
4083 
4084 	rv = -EINVAL;
4085 	if (!mddev->pers->quiesce) {
4086 		pr_warn("md: %s: %s does not support online personality change\n",
4087 			mdname(mddev), mddev->pers->head.name);
4088 		goto out_unlock;
4089 	}
4090 
4091 	/* Now find the new personality */
4092 	memcpy(clevel, buf, slen);
4093 	if (clevel[slen-1] == '\n')
4094 		slen--;
4095 	clevel[slen] = 0;
4096 	if (kstrtol(clevel, 10, &level))
4097 		level = LEVEL_NONE;
4098 
4099 	if (request_module("md-%s", clevel) != 0)
4100 		request_module("md-level-%s", clevel);
4101 	pers = get_pers(level, clevel);
4102 	if (!pers) {
4103 		rv = -EINVAL;
4104 		goto out_unlock;
4105 	}
4106 
4107 	if (pers == mddev->pers) {
4108 		/* Nothing to do! */
4109 		put_pers(pers);
4110 		rv = len;
4111 		goto out_unlock;
4112 	}
4113 	if (!pers->takeover) {
4114 		put_pers(pers);
4115 		pr_warn("md: %s: %s does not support personality takeover\n",
4116 			mdname(mddev), clevel);
4117 		rv = -EINVAL;
4118 		goto out_unlock;
4119 	}
4120 
4121 	rdev_for_each(rdev, mddev)
4122 		rdev->new_raid_disk = rdev->raid_disk;
4123 
4124 	/* ->takeover must set new_* and/or delta_disks
4125 	 * if it succeeds, and may set them when it fails.
4126 	 */
4127 	priv = pers->takeover(mddev);
4128 	if (IS_ERR(priv)) {
4129 		mddev->new_level = mddev->level;
4130 		mddev->new_layout = mddev->layout;
4131 		mddev->new_chunk_sectors = mddev->chunk_sectors;
4132 		mddev->raid_disks -= mddev->delta_disks;
4133 		mddev->delta_disks = 0;
4134 		mddev->reshape_backwards = 0;
4135 		put_pers(pers);
4136 		pr_warn("md: %s: %s would not accept array\n",
4137 			mdname(mddev), clevel);
4138 		rv = PTR_ERR(priv);
4139 		goto out_unlock;
4140 	}
4141 
4142 	/* Looks like we have a winner */
4143 	mddev_detach(mddev);
4144 
4145 	spin_lock(&mddev->lock);
4146 	oldpers = mddev->pers;
4147 	oldpriv = mddev->private;
4148 	mddev->pers = pers;
4149 	mddev->private = priv;
4150 	strscpy(mddev->clevel, pers->head.name, sizeof(mddev->clevel));
4151 	mddev->level = mddev->new_level;
4152 	mddev->layout = mddev->new_layout;
4153 	mddev->chunk_sectors = mddev->new_chunk_sectors;
4154 	mddev->delta_disks = 0;
4155 	mddev->reshape_backwards = 0;
4156 	mddev->degraded = 0;
4157 	spin_unlock(&mddev->lock);
4158 
4159 	if (oldpers->sync_request == NULL &&
4160 	    mddev->external) {
4161 		/* We are converting from a no-redundancy array
4162 		 * to a redundancy array and metadata is managed
4163 		 * externally so we need to be sure that writes
4164 		 * won't block due to a need to transition
4165 		 *      clean->dirty
4166 		 * until external management is started.
4167 		 */
4168 		mddev->in_sync = 0;
4169 		mddev->safemode_delay = 0;
4170 		mddev->safemode = 0;
4171 	}
4172 
4173 	oldpers->free(mddev, oldpriv);
4174 
4175 	if (oldpers->sync_request == NULL &&
4176 	    pers->sync_request != NULL) {
4177 		/* need to add the md_redundancy_group */
4178 		if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
4179 			pr_warn("md: cannot register extra attributes for %s\n",
4180 				mdname(mddev));
4181 		mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
4182 		mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
4183 		mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
4184 	}
4185 	if (oldpers->sync_request != NULL &&
4186 	    pers->sync_request == NULL) {
4187 		/* need to remove the md_redundancy_group */
4188 		if (mddev->to_remove == NULL)
4189 			mddev->to_remove = &md_redundancy_group;
4190 	}
4191 
4192 	put_pers(oldpers);
4193 
4194 	rdev_for_each(rdev, mddev) {
4195 		if (rdev->raid_disk < 0)
4196 			continue;
4197 		if (rdev->new_raid_disk >= mddev->raid_disks)
4198 			rdev->new_raid_disk = -1;
4199 		if (rdev->new_raid_disk == rdev->raid_disk)
4200 			continue;
4201 		sysfs_unlink_rdev(mddev, rdev);
4202 	}
4203 	rdev_for_each(rdev, mddev) {
4204 		if (rdev->raid_disk < 0)
4205 			continue;
4206 		if (rdev->new_raid_disk == rdev->raid_disk)
4207 			continue;
4208 		rdev->raid_disk = rdev->new_raid_disk;
4209 		if (rdev->raid_disk < 0)
4210 			clear_bit(In_sync, &rdev->flags);
4211 		else {
4212 			if (sysfs_link_rdev(mddev, rdev))
4213 				pr_warn("md: cannot register rd%d for %s after level change\n",
4214 					rdev->raid_disk, mdname(mddev));
4215 		}
4216 	}
4217 
4218 	if (pers->sync_request == NULL) {
4219 		/* this is now an array without redundancy, so
4220 		 * it must always be in_sync
4221 		 */
4222 		mddev->in_sync = 1;
4223 		timer_delete_sync(&mddev->safemode_timer);
4224 	}
4225 	pers->run(mddev);
4226 	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
4227 	if (!mddev->thread)
4228 		md_update_sb(mddev, 1);
4229 	sysfs_notify_dirent_safe(mddev->sysfs_level);
4230 	md_new_event();
4231 	rv = len;
4232 out_unlock:
4233 	mddev_unlock_and_resume(mddev);
4234 	return rv;
4235 }
4236 
4237 static struct md_sysfs_entry md_level =
4238 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
4239 
4240 static ssize_t
new_level_show(struct mddev * mddev,char * page)4241 new_level_show(struct mddev *mddev, char *page)
4242 {
4243 	return sprintf(page, "%d\n", mddev->new_level);
4244 }
4245 
4246 static ssize_t
new_level_store(struct mddev * mddev,const char * buf,size_t len)4247 new_level_store(struct mddev *mddev, const char *buf, size_t len)
4248 {
4249 	unsigned int n;
4250 	int err;
4251 
4252 	err = kstrtouint(buf, 10, &n);
4253 	if (err < 0)
4254 		return err;
4255 	err = mddev_lock(mddev);
4256 	if (err)
4257 		return err;
4258 
4259 	mddev->new_level = n;
4260 	md_update_sb(mddev, 1);
4261 
4262 	mddev_unlock(mddev);
4263 	return len;
4264 }
4265 static struct md_sysfs_entry md_new_level =
4266 __ATTR(new_level, 0664, new_level_show, new_level_store);
4267 
4268 static ssize_t
bitmap_type_show(struct mddev * mddev,char * page)4269 bitmap_type_show(struct mddev *mddev, char *page)
4270 {
4271 	struct md_submodule_head *head;
4272 	unsigned long i;
4273 	ssize_t len = 0;
4274 
4275 	if (mddev->bitmap_id == ID_BITMAP_NONE)
4276 		len += sprintf(page + len, "[none] ");
4277 	else
4278 		len += sprintf(page + len, "none ");
4279 
4280 	xa_lock(&md_submodule);
4281 	xa_for_each(&md_submodule, i, head) {
4282 		if (head->type != MD_BITMAP)
4283 			continue;
4284 
4285 		if (mddev->bitmap_id == head->id)
4286 			len += sprintf(page + len, "[%s] ", head->name);
4287 		else
4288 			len += sprintf(page + len, "%s ", head->name);
4289 	}
4290 	xa_unlock(&md_submodule);
4291 
4292 	len += sprintf(page + len, "\n");
4293 	return len;
4294 }
4295 
4296 static ssize_t
bitmap_type_store(struct mddev * mddev,const char * buf,size_t len)4297 bitmap_type_store(struct mddev *mddev, const char *buf, size_t len)
4298 {
4299 	struct md_submodule_head *head;
4300 	enum md_submodule_id id;
4301 	unsigned long i;
4302 	int err = 0;
4303 
4304 	xa_lock(&md_submodule);
4305 
4306 	if (mddev->bitmap_ops) {
4307 		err = -EBUSY;
4308 		goto out;
4309 	}
4310 
4311 	if (cmd_match(buf, "none")) {
4312 		mddev->bitmap_id = ID_BITMAP_NONE;
4313 		goto out;
4314 	}
4315 
4316 	xa_for_each(&md_submodule, i, head) {
4317 		if (head->type == MD_BITMAP && cmd_match(buf, head->name)) {
4318 			mddev->bitmap_id = head->id;
4319 			goto out;
4320 		}
4321 	}
4322 
4323 	err = kstrtoint(buf, 10, &id);
4324 	if (err)
4325 		goto out;
4326 
4327 	if (id == ID_BITMAP_NONE) {
4328 		mddev->bitmap_id = id;
4329 		goto out;
4330 	}
4331 
4332 	head = xa_load(&md_submodule, id);
4333 	if (head && head->type == MD_BITMAP) {
4334 		mddev->bitmap_id = id;
4335 		goto out;
4336 	}
4337 
4338 	err = -ENOENT;
4339 
4340 out:
4341 	xa_unlock(&md_submodule);
4342 	return err ? err : len;
4343 }
4344 
4345 static struct md_sysfs_entry md_bitmap_type =
4346 __ATTR(bitmap_type, 0664, bitmap_type_show, bitmap_type_store);
4347 
4348 static ssize_t
layout_show(struct mddev * mddev,char * page)4349 layout_show(struct mddev *mddev, char *page)
4350 {
4351 	/* just a number, not meaningful for all levels */
4352 	if (mddev->reshape_position != MaxSector &&
4353 	    mddev->layout != mddev->new_layout)
4354 		return sprintf(page, "%d (%d)\n",
4355 			       mddev->new_layout, mddev->layout);
4356 	return sprintf(page, "%d\n", mddev->layout);
4357 }
4358 
4359 static ssize_t
layout_store(struct mddev * mddev,const char * buf,size_t len)4360 layout_store(struct mddev *mddev, const char *buf, size_t len)
4361 {
4362 	unsigned int n;
4363 	int err;
4364 
4365 	err = kstrtouint(buf, 10, &n);
4366 	if (err < 0)
4367 		return err;
4368 	err = mddev_lock(mddev);
4369 	if (err)
4370 		return err;
4371 
4372 	if (mddev->pers) {
4373 		if (mddev->pers->check_reshape == NULL)
4374 			err = -EBUSY;
4375 		else if (!md_is_rdwr(mddev))
4376 			err = -EROFS;
4377 		else {
4378 			mddev->new_layout = n;
4379 			err = mddev->pers->check_reshape(mddev);
4380 			if (err)
4381 				mddev->new_layout = mddev->layout;
4382 		}
4383 	} else {
4384 		mddev->new_layout = n;
4385 		if (mddev->reshape_position == MaxSector)
4386 			mddev->layout = n;
4387 	}
4388 	mddev_unlock(mddev);
4389 	return err ?: len;
4390 }
4391 static struct md_sysfs_entry md_layout =
4392 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
4393 
4394 static ssize_t
raid_disks_show(struct mddev * mddev,char * page)4395 raid_disks_show(struct mddev *mddev, char *page)
4396 {
4397 	if (mddev->raid_disks == 0)
4398 		return 0;
4399 	if (mddev->reshape_position != MaxSector &&
4400 	    mddev->delta_disks != 0)
4401 		return sprintf(page, "%d (%d)\n", mddev->raid_disks,
4402 			       mddev->raid_disks - mddev->delta_disks);
4403 	return sprintf(page, "%d\n", mddev->raid_disks);
4404 }
4405 
4406 static int update_raid_disks(struct mddev *mddev, int raid_disks);
4407 
4408 static ssize_t
raid_disks_store(struct mddev * mddev,const char * buf,size_t len)4409 raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
4410 {
4411 	unsigned int n;
4412 	int err;
4413 
4414 	err = kstrtouint(buf, 10, &n);
4415 	if (err < 0)
4416 		return err;
4417 
4418 	err = mddev_suspend_and_lock(mddev);
4419 	if (err)
4420 		return err;
4421 	if (mddev->pers)
4422 		err = update_raid_disks(mddev, n);
4423 	else if (mddev->reshape_position != MaxSector) {
4424 		struct md_rdev *rdev;
4425 		int olddisks = mddev->raid_disks - mddev->delta_disks;
4426 
4427 		err = -EINVAL;
4428 		rdev_for_each(rdev, mddev) {
4429 			if (olddisks < n &&
4430 			    rdev->data_offset < rdev->new_data_offset)
4431 				goto out_unlock;
4432 			if (olddisks > n &&
4433 			    rdev->data_offset > rdev->new_data_offset)
4434 				goto out_unlock;
4435 		}
4436 		err = 0;
4437 		mddev->delta_disks = n - olddisks;
4438 		mddev->raid_disks = n;
4439 		mddev->reshape_backwards = (mddev->delta_disks < 0);
4440 	} else
4441 		mddev->raid_disks = n;
4442 out_unlock:
4443 	mddev_unlock_and_resume(mddev);
4444 	return err ? err : len;
4445 }
4446 static struct md_sysfs_entry md_raid_disks =
4447 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
4448 
4449 static ssize_t
uuid_show(struct mddev * mddev,char * page)4450 uuid_show(struct mddev *mddev, char *page)
4451 {
4452 	return sprintf(page, "%pU\n", mddev->uuid);
4453 }
4454 static struct md_sysfs_entry md_uuid =
4455 __ATTR(uuid, S_IRUGO, uuid_show, NULL);
4456 
4457 static ssize_t
chunk_size_show(struct mddev * mddev,char * page)4458 chunk_size_show(struct mddev *mddev, char *page)
4459 {
4460 	if (mddev->reshape_position != MaxSector &&
4461 	    mddev->chunk_sectors != mddev->new_chunk_sectors)
4462 		return sprintf(page, "%d (%d)\n",
4463 			       mddev->new_chunk_sectors << 9,
4464 			       mddev->chunk_sectors << 9);
4465 	return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
4466 }
4467 
4468 static ssize_t
chunk_size_store(struct mddev * mddev,const char * buf,size_t len)4469 chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
4470 {
4471 	unsigned long n;
4472 	int err;
4473 
4474 	err = kstrtoul(buf, 10, &n);
4475 	if (err < 0)
4476 		return err;
4477 
4478 	err = mddev_lock(mddev);
4479 	if (err)
4480 		return err;
4481 	if (mddev->pers) {
4482 		if (mddev->pers->check_reshape == NULL)
4483 			err = -EBUSY;
4484 		else if (!md_is_rdwr(mddev))
4485 			err = -EROFS;
4486 		else {
4487 			mddev->new_chunk_sectors = n >> 9;
4488 			err = mddev->pers->check_reshape(mddev);
4489 			if (err)
4490 				mddev->new_chunk_sectors = mddev->chunk_sectors;
4491 		}
4492 	} else {
4493 		mddev->new_chunk_sectors = n >> 9;
4494 		if (mddev->reshape_position == MaxSector)
4495 			mddev->chunk_sectors = n >> 9;
4496 	}
4497 	mddev_unlock(mddev);
4498 	return err ?: len;
4499 }
4500 static struct md_sysfs_entry md_chunk_size =
4501 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
4502 
4503 static ssize_t
resync_start_show(struct mddev * mddev,char * page)4504 resync_start_show(struct mddev *mddev, char *page)
4505 {
4506 	if (mddev->resync_offset == MaxSector)
4507 		return sprintf(page, "none\n");
4508 	return sprintf(page, "%llu\n", (unsigned long long)mddev->resync_offset);
4509 }
4510 
4511 static ssize_t
resync_start_store(struct mddev * mddev,const char * buf,size_t len)4512 resync_start_store(struct mddev *mddev, const char *buf, size_t len)
4513 {
4514 	unsigned long long n;
4515 	int err;
4516 
4517 	if (cmd_match(buf, "none"))
4518 		n = MaxSector;
4519 	else {
4520 		err = kstrtoull(buf, 10, &n);
4521 		if (err < 0)
4522 			return err;
4523 		if (n != (sector_t)n)
4524 			return -EINVAL;
4525 	}
4526 
4527 	err = mddev_lock(mddev);
4528 	if (err)
4529 		return err;
4530 	if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
4531 		err = -EBUSY;
4532 
4533 	if (!err) {
4534 		mddev->resync_offset = n;
4535 		if (mddev->pers)
4536 			set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
4537 	}
4538 	mddev_unlock(mddev);
4539 	return err ?: len;
4540 }
4541 static struct md_sysfs_entry md_resync_start =
4542 __ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR,
4543 		resync_start_show, resync_start_store);
4544 
4545 /*
4546  * The array state can be:
4547  *
4548  * clear
4549  *     No devices, no size, no level
4550  *     Equivalent to STOP_ARRAY ioctl
4551  * inactive
4552  *     May have some settings, but array is not active
4553  *        all IO results in error
4554  *     When written, doesn't tear down array, but just stops it
4555  * suspended (not supported yet)
4556  *     All IO requests will block. The array can be reconfigured.
4557  *     Writing this, if accepted, will block until array is quiescent
4558  * readonly
4559  *     no resync can happen.  no superblocks get written.
4560  *     write requests fail
4561  * read-auto
4562  *     like readonly, but behaves like 'clean' on a write request.
4563  *
4564  * clean - no pending writes, but otherwise active.
4565  *     When written to inactive array, starts without resync
4566  *     If a write request arrives then
4567  *       if metadata is known, mark 'dirty' and switch to 'active'.
4568  *       if not known, block and switch to write-pending
4569  *     If written to an active array that has pending writes, then fails.
4570  * active
4571  *     fully active: IO and resync can be happening.
4572  *     When written to inactive array, starts with resync
4573  *
4574  * write-pending
4575  *     clean, but writes are blocked waiting for 'active' to be written.
4576  *
4577  * active-idle
4578  *     like active, but no writes have been seen for a while (100msec).
4579  *
4580  * broken
4581 *     Array is failed. It's useful because mounted-arrays aren't stopped
4582 *     when array is failed, so this state will at least alert the user that
4583 *     something is wrong.
4584  */
4585 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
4586 		   write_pending, active_idle, broken, bad_word};
4587 static char *array_states[] = {
4588 	"clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
4589 	"write-pending", "active-idle", "broken", NULL };
4590 
match_word(const char * word,char ** list)4591 static int match_word(const char *word, char **list)
4592 {
4593 	int n;
4594 	for (n=0; list[n]; n++)
4595 		if (cmd_match(word, list[n]))
4596 			break;
4597 	return n;
4598 }
4599 
4600 static ssize_t
array_state_show(struct mddev * mddev,char * page)4601 array_state_show(struct mddev *mddev, char *page)
4602 {
4603 	enum array_state st = inactive;
4604 
4605 	if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags)) {
4606 		switch(mddev->ro) {
4607 		case MD_RDONLY:
4608 			st = readonly;
4609 			break;
4610 		case MD_AUTO_READ:
4611 			st = read_auto;
4612 			break;
4613 		case MD_RDWR:
4614 			spin_lock(&mddev->lock);
4615 			if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
4616 				st = write_pending;
4617 			else if (mddev->in_sync)
4618 				st = clean;
4619 			else if (mddev->safemode)
4620 				st = active_idle;
4621 			else
4622 				st = active;
4623 			spin_unlock(&mddev->lock);
4624 		}
4625 
4626 		if (test_bit(MD_BROKEN, &mddev->flags) && st == clean)
4627 			st = broken;
4628 	} else {
4629 		if (list_empty(&mddev->disks) &&
4630 		    mddev->raid_disks == 0 &&
4631 		    mddev->dev_sectors == 0)
4632 			st = clear;
4633 		else
4634 			st = inactive;
4635 	}
4636 	return sprintf(page, "%s\n", array_states[st]);
4637 }
4638 
4639 static int do_md_stop(struct mddev *mddev, int ro);
4640 static int md_set_readonly(struct mddev *mddev);
4641 static int restart_array(struct mddev *mddev);
4642 
4643 static ssize_t
array_state_store(struct mddev * mddev,const char * buf,size_t len)4644 array_state_store(struct mddev *mddev, const char *buf, size_t len)
4645 {
4646 	int err = 0;
4647 	enum array_state st = match_word(buf, array_states);
4648 
4649 	/* No lock dependent actions */
4650 	switch (st) {
4651 	case suspended:		/* not supported yet */
4652 	case write_pending:	/* cannot be set */
4653 	case active_idle:	/* cannot be set */
4654 	case broken:		/* cannot be set */
4655 	case bad_word:
4656 		return -EINVAL;
4657 	case clear:
4658 	case readonly:
4659 	case inactive:
4660 	case read_auto:
4661 		if (!mddev->pers || !md_is_rdwr(mddev))
4662 			break;
4663 		/* write sysfs will not open mddev and opener should be 0 */
4664 		err = mddev_set_closing_and_sync_blockdev(mddev, 0);
4665 		if (err)
4666 			return err;
4667 		break;
4668 	default:
4669 		break;
4670 	}
4671 
4672 	if (mddev->pers && (st == active || st == clean) &&
4673 	    mddev->ro != MD_RDONLY) {
4674 		/* don't take reconfig_mutex when toggling between
4675 		 * clean and active
4676 		 */
4677 		spin_lock(&mddev->lock);
4678 		if (st == active) {
4679 			restart_array(mddev);
4680 			clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
4681 			md_wakeup_thread(mddev->thread);
4682 			wake_up(&mddev->sb_wait);
4683 		} else /* st == clean */ {
4684 			restart_array(mddev);
4685 			if (!set_in_sync(mddev))
4686 				err = -EBUSY;
4687 		}
4688 		if (!err)
4689 			sysfs_notify_dirent_safe(mddev->sysfs_state);
4690 		spin_unlock(&mddev->lock);
4691 		return err ?: len;
4692 	}
4693 	err = mddev_lock(mddev);
4694 	if (err)
4695 		return err;
4696 
4697 	switch (st) {
4698 	case inactive:
4699 		/* stop an active array, return 0 otherwise */
4700 		if (mddev->pers)
4701 			err = do_md_stop(mddev, 2);
4702 		break;
4703 	case clear:
4704 		err = do_md_stop(mddev, 0);
4705 		break;
4706 	case readonly:
4707 		if (mddev->pers)
4708 			err = md_set_readonly(mddev);
4709 		else {
4710 			mddev->ro = MD_RDONLY;
4711 			set_disk_ro(mddev->gendisk, 1);
4712 			err = do_md_run(mddev);
4713 		}
4714 		break;
4715 	case read_auto:
4716 		if (mddev->pers) {
4717 			if (md_is_rdwr(mddev))
4718 				err = md_set_readonly(mddev);
4719 			else if (mddev->ro == MD_RDONLY)
4720 				err = restart_array(mddev);
4721 			if (err == 0) {
4722 				mddev->ro = MD_AUTO_READ;
4723 				set_disk_ro(mddev->gendisk, 0);
4724 			}
4725 		} else {
4726 			mddev->ro = MD_AUTO_READ;
4727 			err = do_md_run(mddev);
4728 		}
4729 		break;
4730 	case clean:
4731 		if (mddev->pers) {
4732 			err = restart_array(mddev);
4733 			if (err)
4734 				break;
4735 			spin_lock(&mddev->lock);
4736 			if (!set_in_sync(mddev))
4737 				err = -EBUSY;
4738 			spin_unlock(&mddev->lock);
4739 		} else
4740 			err = -EINVAL;
4741 		break;
4742 	case active:
4743 		if (mddev->pers) {
4744 			err = restart_array(mddev);
4745 			if (err)
4746 				break;
4747 			clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
4748 			wake_up(&mddev->sb_wait);
4749 			err = 0;
4750 		} else {
4751 			mddev->ro = MD_RDWR;
4752 			set_disk_ro(mddev->gendisk, 0);
4753 			err = do_md_run(mddev);
4754 		}
4755 		break;
4756 	default:
4757 		err = -EINVAL;
4758 		break;
4759 	}
4760 
4761 	if (!err) {
4762 		if (mddev->hold_active == UNTIL_IOCTL)
4763 			mddev->hold_active = 0;
4764 		sysfs_notify_dirent_safe(mddev->sysfs_state);
4765 	}
4766 	mddev_unlock(mddev);
4767 
4768 	if (st == readonly || st == read_auto || st == inactive ||
4769 	    (err && st == clear))
4770 		clear_bit(MD_CLOSING, &mddev->flags);
4771 
4772 	return err ?: len;
4773 }
4774 static struct md_sysfs_entry md_array_state =
4775 __ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
4776 
4777 static ssize_t
max_corrected_read_errors_show(struct mddev * mddev,char * page)4778 max_corrected_read_errors_show(struct mddev *mddev, char *page) {
4779 	return sprintf(page, "%d\n",
4780 		       atomic_read(&mddev->max_corr_read_errors));
4781 }
4782 
4783 static ssize_t
max_corrected_read_errors_store(struct mddev * mddev,const char * buf,size_t len)4784 max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len)
4785 {
4786 	unsigned int n;
4787 	int rv;
4788 
4789 	rv = kstrtouint(buf, 10, &n);
4790 	if (rv < 0)
4791 		return rv;
4792 	if (n > INT_MAX)
4793 		return -EINVAL;
4794 	atomic_set(&mddev->max_corr_read_errors, n);
4795 	return len;
4796 }
4797 
4798 static struct md_sysfs_entry max_corr_read_errors =
4799 __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
4800 	max_corrected_read_errors_store);
4801 
4802 static ssize_t
null_show(struct mddev * mddev,char * page)4803 null_show(struct mddev *mddev, char *page)
4804 {
4805 	return -EINVAL;
4806 }
4807 
4808 static ssize_t
new_dev_store(struct mddev * mddev,const char * buf,size_t len)4809 new_dev_store(struct mddev *mddev, const char *buf, size_t len)
4810 {
4811 	/* buf must be %d:%d\n? giving major and minor numbers */
4812 	/* The new device is added to the array.
4813 	 * If the array has a persistent superblock, we read the
4814 	 * superblock to initialise info and check validity.
4815 	 * Otherwise, only checking done is that in bind_rdev_to_array,
4816 	 * which mainly checks size.
4817 	 */
4818 	char *e;
4819 	int major = simple_strtoul(buf, &e, 10);
4820 	int minor;
4821 	dev_t dev;
4822 	struct md_rdev *rdev;
4823 	int err;
4824 
4825 	if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
4826 		return -EINVAL;
4827 	minor = simple_strtoul(e+1, &e, 10);
4828 	if (*e && *e != '\n')
4829 		return -EINVAL;
4830 	dev = MKDEV(major, minor);
4831 	if (major != MAJOR(dev) ||
4832 	    minor != MINOR(dev))
4833 		return -EOVERFLOW;
4834 
4835 	err = mddev_suspend_and_lock(mddev);
4836 	if (err)
4837 		return err;
4838 	if (mddev->persistent) {
4839 		rdev = md_import_device(dev, mddev->major_version,
4840 					mddev->minor_version);
4841 		if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
4842 			struct md_rdev *rdev0
4843 				= list_entry(mddev->disks.next,
4844 					     struct md_rdev, same_set);
4845 			err = super_types[mddev->major_version]
4846 				.load_super(rdev, rdev0, mddev->minor_version);
4847 			if (err < 0)
4848 				goto out;
4849 		}
4850 	} else if (mddev->external)
4851 		rdev = md_import_device(dev, -2, -1);
4852 	else
4853 		rdev = md_import_device(dev, -1, -1);
4854 
4855 	if (IS_ERR(rdev)) {
4856 		mddev_unlock_and_resume(mddev);
4857 		return PTR_ERR(rdev);
4858 	}
4859 	err = bind_rdev_to_array(rdev, mddev);
4860  out:
4861 	if (err)
4862 		export_rdev(rdev);
4863 	mddev_unlock_and_resume(mddev);
4864 	if (!err)
4865 		md_new_event();
4866 	return err ? err : len;
4867 }
4868 
4869 static struct md_sysfs_entry md_new_device =
4870 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
4871 
4872 static ssize_t
bitmap_store(struct mddev * mddev,const char * buf,size_t len)4873 bitmap_store(struct mddev *mddev, const char *buf, size_t len)
4874 {
4875 	char *end;
4876 	unsigned long chunk, end_chunk;
4877 	int err;
4878 
4879 	if (!md_bitmap_enabled(mddev, false))
4880 		return len;
4881 
4882 	err = mddev_lock(mddev);
4883 	if (err)
4884 		return err;
4885 	if (!mddev->bitmap)
4886 		goto out;
4887 	/* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
4888 	while (*buf) {
4889 		chunk = end_chunk = simple_strtoul(buf, &end, 0);
4890 		if (buf == end)
4891 			break;
4892 
4893 		if (*end == '-') { /* range */
4894 			buf = end + 1;
4895 			end_chunk = simple_strtoul(buf, &end, 0);
4896 			if (buf == end)
4897 				break;
4898 		}
4899 
4900 		if (*end && !isspace(*end))
4901 			break;
4902 
4903 		mddev->bitmap_ops->dirty_bits(mddev, chunk, end_chunk);
4904 		buf = skip_spaces(end);
4905 	}
4906 	mddev->bitmap_ops->unplug(mddev, true); /* flush the bits to disk */
4907 out:
4908 	mddev_unlock(mddev);
4909 	return len;
4910 }
4911 
4912 static struct md_sysfs_entry md_bitmap =
4913 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
4914 
4915 static ssize_t
size_show(struct mddev * mddev,char * page)4916 size_show(struct mddev *mddev, char *page)
4917 {
4918 	return sprintf(page, "%llu\n",
4919 		(unsigned long long)mddev->dev_sectors / 2);
4920 }
4921 
4922 static int update_size(struct mddev *mddev, sector_t num_sectors);
4923 
4924 static ssize_t
size_store(struct mddev * mddev,const char * buf,size_t len)4925 size_store(struct mddev *mddev, const char *buf, size_t len)
4926 {
4927 	/* If array is inactive, we can reduce the component size, but
4928 	 * not increase it (except from 0).
4929 	 * If array is active, we can try an on-line resize
4930 	 */
4931 	sector_t sectors;
4932 	int err = strict_blocks_to_sectors(buf, &sectors);
4933 
4934 	if (err < 0)
4935 		return err;
4936 	err = mddev_lock(mddev);
4937 	if (err)
4938 		return err;
4939 	if (mddev->pers) {
4940 		err = update_size(mddev, sectors);
4941 		if (err == 0)
4942 			md_update_sb(mddev, 1);
4943 	} else {
4944 		if (mddev->dev_sectors == 0 ||
4945 		    mddev->dev_sectors > sectors)
4946 			mddev->dev_sectors = sectors;
4947 		else
4948 			err = -ENOSPC;
4949 	}
4950 	mddev_unlock(mddev);
4951 	return err ? err : len;
4952 }
4953 
4954 static struct md_sysfs_entry md_size =
4955 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
4956 
4957 /* Metadata version.
4958  * This is one of
4959  *   'none' for arrays with no metadata (good luck...)
4960  *   'external' for arrays with externally managed metadata,
4961  * or N.M for internally known formats
4962  */
4963 static ssize_t
metadata_show(struct mddev * mddev,char * page)4964 metadata_show(struct mddev *mddev, char *page)
4965 {
4966 	if (mddev->persistent)
4967 		return sprintf(page, "%d.%d\n",
4968 			       mddev->major_version, mddev->minor_version);
4969 	else if (mddev->external)
4970 		return sprintf(page, "external:%s\n", mddev->metadata_type);
4971 	else
4972 		return sprintf(page, "none\n");
4973 }
4974 
4975 static ssize_t
metadata_store(struct mddev * mddev,const char * buf,size_t len)4976 metadata_store(struct mddev *mddev, const char *buf, size_t len)
4977 {
4978 	int major, minor;
4979 	char *e;
4980 	int err;
4981 	/* Changing the details of 'external' metadata is
4982 	 * always permitted.  Otherwise there must be
4983 	 * no devices attached to the array.
4984 	 */
4985 
4986 	err = mddev_lock(mddev);
4987 	if (err)
4988 		return err;
4989 	err = -EBUSY;
4990 	if (mddev->external && strncmp(buf, "external:", 9) == 0)
4991 		;
4992 	else if (!list_empty(&mddev->disks))
4993 		goto out_unlock;
4994 
4995 	err = 0;
4996 	if (cmd_match(buf, "none")) {
4997 		mddev->persistent = 0;
4998 		mddev->external = 0;
4999 		mddev->major_version = 0;
5000 		mddev->minor_version = 90;
5001 		goto out_unlock;
5002 	}
5003 	if (strncmp(buf, "external:", 9) == 0) {
5004 		size_t namelen = len-9;
5005 		if (namelen >= sizeof(mddev->metadata_type))
5006 			namelen = sizeof(mddev->metadata_type)-1;
5007 		memcpy(mddev->metadata_type, buf+9, namelen);
5008 		mddev->metadata_type[namelen] = 0;
5009 		if (namelen && mddev->metadata_type[namelen-1] == '\n')
5010 			mddev->metadata_type[--namelen] = 0;
5011 		mddev->persistent = 0;
5012 		mddev->external = 1;
5013 		mddev->major_version = 0;
5014 		mddev->minor_version = 90;
5015 		goto out_unlock;
5016 	}
5017 	major = simple_strtoul(buf, &e, 10);
5018 	err = -EINVAL;
5019 	if (e==buf || *e != '.')
5020 		goto out_unlock;
5021 	buf = e+1;
5022 	minor = simple_strtoul(buf, &e, 10);
5023 	if (e==buf || (*e && *e != '\n') )
5024 		goto out_unlock;
5025 	err = -ENOENT;
5026 	if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
5027 		goto out_unlock;
5028 	mddev->major_version = major;
5029 	mddev->minor_version = minor;
5030 	mddev->persistent = 1;
5031 	mddev->external = 0;
5032 	err = 0;
5033 out_unlock:
5034 	mddev_unlock(mddev);
5035 	return err ?: len;
5036 }
5037 
5038 static struct md_sysfs_entry md_metadata =
5039 __ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
5040 
rdev_needs_recovery(struct md_rdev * rdev,sector_t sectors)5041 static bool rdev_needs_recovery(struct md_rdev *rdev, sector_t sectors)
5042 {
5043 	return rdev->raid_disk >= 0 &&
5044 	       !test_bit(Journal, &rdev->flags) &&
5045 	       !test_bit(Faulty, &rdev->flags) &&
5046 	       !test_bit(In_sync, &rdev->flags) &&
5047 	       rdev->recovery_offset < sectors;
5048 }
5049 
md_get_active_sync_action(struct mddev * mddev)5050 static enum sync_action md_get_active_sync_action(struct mddev *mddev)
5051 {
5052 	struct md_rdev *rdev;
5053 	bool is_recover = false;
5054 
5055 	if (mddev->resync_offset < MaxSector)
5056 		return ACTION_RESYNC;
5057 
5058 	if (mddev->reshape_position != MaxSector)
5059 		return ACTION_RESHAPE;
5060 
5061 	rcu_read_lock();
5062 	rdev_for_each_rcu(rdev, mddev) {
5063 		if (rdev_needs_recovery(rdev, MaxSector)) {
5064 			is_recover = true;
5065 			break;
5066 		}
5067 	}
5068 	rcu_read_unlock();
5069 
5070 	return is_recover ? ACTION_RECOVER : ACTION_IDLE;
5071 }
5072 
md_sync_action(struct mddev * mddev)5073 enum sync_action md_sync_action(struct mddev *mddev)
5074 {
5075 	unsigned long recovery = mddev->recovery;
5076 	enum sync_action active_action;
5077 
5078 	/*
5079 	 * frozen has the highest priority, means running sync_thread will be
5080 	 * stopped immediately, and no new sync_thread can start.
5081 	 */
5082 	if (test_bit(MD_RECOVERY_FROZEN, &recovery))
5083 		return ACTION_FROZEN;
5084 
5085 	/*
5086 	 * read-only array can't register sync_thread, and it can only
5087 	 * add/remove spares.
5088 	 */
5089 	if (!md_is_rdwr(mddev))
5090 		return ACTION_IDLE;
5091 
5092 	/*
5093 	 * idle means no sync_thread is running, and no new sync_thread is
5094 	 * requested.
5095 	 */
5096 	if (!test_bit(MD_RECOVERY_RUNNING, &recovery) &&
5097 	    !test_bit(MD_RECOVERY_NEEDED, &recovery))
5098 		return ACTION_IDLE;
5099 
5100 	/*
5101 	 * Check if any sync operation (resync/recover/reshape) is
5102 	 * currently active. This ensures that only one sync operation
5103 	 * can run at a time. Returns the type of active operation, or
5104 	 * ACTION_IDLE if none are active.
5105 	 */
5106 	active_action = md_get_active_sync_action(mddev);
5107 	if (active_action != ACTION_IDLE)
5108 		return active_action;
5109 
5110 	if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
5111 		return ACTION_RESHAPE;
5112 
5113 	if (test_bit(MD_RECOVERY_RECOVER, &recovery))
5114 		return ACTION_RECOVER;
5115 
5116 	if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
5117 		/*
5118 		 * MD_RECOVERY_CHECK must be paired with
5119 		 * MD_RECOVERY_REQUESTED.
5120 		 */
5121 		if (test_bit(MD_RECOVERY_CHECK, &recovery))
5122 			return ACTION_CHECK;
5123 		if (test_bit(MD_RECOVERY_REQUESTED, &recovery))
5124 			return ACTION_REPAIR;
5125 		return ACTION_RESYNC;
5126 	}
5127 
5128 	/*
5129 	 * MD_RECOVERY_NEEDED or MD_RECOVERY_RUNNING is set, however, no
5130 	 * sync_action is specified.
5131 	 */
5132 	return ACTION_IDLE;
5133 }
5134 
md_sync_action_by_name(const char * page)5135 enum sync_action md_sync_action_by_name(const char *page)
5136 {
5137 	enum sync_action action;
5138 
5139 	for (action = 0; action < NR_SYNC_ACTIONS; ++action) {
5140 		if (cmd_match(page, action_name[action]))
5141 			return action;
5142 	}
5143 
5144 	return NR_SYNC_ACTIONS;
5145 }
5146 
md_sync_action_name(enum sync_action action)5147 const char *md_sync_action_name(enum sync_action action)
5148 {
5149 	return action_name[action];
5150 }
5151 
5152 static ssize_t
action_show(struct mddev * mddev,char * page)5153 action_show(struct mddev *mddev, char *page)
5154 {
5155 	enum sync_action action = md_sync_action(mddev);
5156 
5157 	return sprintf(page, "%s\n", md_sync_action_name(action));
5158 }
5159 
5160 /**
5161  * stop_sync_thread() - wait for sync_thread to stop if it's running.
5162  * @mddev:	the array.
5163  * @locked:	if set, reconfig_mutex will still be held after this function
5164  *		return; if not set, reconfig_mutex will be released after this
5165  *		function return.
5166  */
stop_sync_thread(struct mddev * mddev,bool locked)5167 static void stop_sync_thread(struct mddev *mddev, bool locked)
5168 {
5169 	int sync_seq = atomic_read(&mddev->sync_seq);
5170 
5171 	if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
5172 		if (!locked)
5173 			mddev_unlock(mddev);
5174 		return;
5175 	}
5176 
5177 	mddev_unlock(mddev);
5178 
5179 	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5180 	/*
5181 	 * Thread might be blocked waiting for metadata update which will now
5182 	 * never happen
5183 	 */
5184 	md_wakeup_thread_directly(&mddev->sync_thread);
5185 	if (work_pending(&mddev->sync_work))
5186 		flush_work(&mddev->sync_work);
5187 
5188 	wait_event(resync_wait,
5189 		   !test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
5190 		   (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery) &&
5191 		    sync_seq != atomic_read(&mddev->sync_seq)));
5192 
5193 	if (locked)
5194 		mddev_lock_nointr(mddev);
5195 }
5196 
md_idle_sync_thread(struct mddev * mddev)5197 void md_idle_sync_thread(struct mddev *mddev)
5198 {
5199 	lockdep_assert_held(&mddev->reconfig_mutex);
5200 
5201 	clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5202 	stop_sync_thread(mddev, true);
5203 }
5204 EXPORT_SYMBOL_GPL(md_idle_sync_thread);
5205 
md_frozen_sync_thread(struct mddev * mddev)5206 void md_frozen_sync_thread(struct mddev *mddev)
5207 {
5208 	lockdep_assert_held(&mddev->reconfig_mutex);
5209 
5210 	set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5211 	stop_sync_thread(mddev, true);
5212 }
5213 EXPORT_SYMBOL_GPL(md_frozen_sync_thread);
5214 
md_unfrozen_sync_thread(struct mddev * mddev)5215 void md_unfrozen_sync_thread(struct mddev *mddev)
5216 {
5217 	lockdep_assert_held(&mddev->reconfig_mutex);
5218 
5219 	clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5220 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5221 	md_wakeup_thread(mddev->thread);
5222 	sysfs_notify_dirent_safe(mddev->sysfs_action);
5223 }
5224 EXPORT_SYMBOL_GPL(md_unfrozen_sync_thread);
5225 
mddev_start_reshape(struct mddev * mddev)5226 static int mddev_start_reshape(struct mddev *mddev)
5227 {
5228 	int ret;
5229 
5230 	if (mddev->pers->start_reshape == NULL)
5231 		return -EINVAL;
5232 
5233 	if (mddev->reshape_position == MaxSector ||
5234 	    mddev->pers->check_reshape == NULL ||
5235 	    mddev->pers->check_reshape(mddev)) {
5236 		clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5237 		ret = mddev->pers->start_reshape(mddev);
5238 		if (ret)
5239 			return ret;
5240 	} else {
5241 		/*
5242 		 * If reshape is still in progress, and md_check_recovery() can
5243 		 * continue to reshape, don't restart reshape because data can
5244 		 * be corrupted for raid456.
5245 		 */
5246 		clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5247 	}
5248 
5249 	sysfs_notify_dirent_safe(mddev->sysfs_degraded);
5250 	return 0;
5251 }
5252 
5253 static ssize_t
action_store(struct mddev * mddev,const char * page,size_t len)5254 action_store(struct mddev *mddev, const char *page, size_t len)
5255 {
5256 	int ret;
5257 	enum sync_action action;
5258 
5259 	if (!mddev->pers || !mddev->pers->sync_request)
5260 		return -EINVAL;
5261 
5262 retry:
5263 	if (work_busy(&mddev->sync_work))
5264 		flush_work(&mddev->sync_work);
5265 
5266 	ret = mddev_lock(mddev);
5267 	if (ret)
5268 		return ret;
5269 
5270 	if (work_busy(&mddev->sync_work)) {
5271 		mddev_unlock(mddev);
5272 		goto retry;
5273 	}
5274 
5275 	action = md_sync_action_by_name(page);
5276 
5277 	/* TODO: mdadm rely on "idle" to start sync_thread. */
5278 	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
5279 		switch (action) {
5280 		case ACTION_FROZEN:
5281 			md_frozen_sync_thread(mddev);
5282 			ret = len;
5283 			goto out;
5284 		case ACTION_IDLE:
5285 			md_idle_sync_thread(mddev);
5286 			break;
5287 		case ACTION_RESHAPE:
5288 		case ACTION_RECOVER:
5289 		case ACTION_CHECK:
5290 		case ACTION_REPAIR:
5291 		case ACTION_RESYNC:
5292 			ret = -EBUSY;
5293 			goto out;
5294 		default:
5295 			ret = -EINVAL;
5296 			goto out;
5297 		}
5298 	} else {
5299 		switch (action) {
5300 		case ACTION_FROZEN:
5301 			set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5302 			ret = len;
5303 			goto out;
5304 		case ACTION_RESHAPE:
5305 			clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5306 			ret = mddev_start_reshape(mddev);
5307 			if (ret)
5308 				goto out;
5309 			break;
5310 		case ACTION_RECOVER:
5311 			clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5312 			set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
5313 			break;
5314 		case ACTION_CHECK:
5315 			set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
5316 			fallthrough;
5317 		case ACTION_REPAIR:
5318 			set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
5319 			set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
5320 			fallthrough;
5321 		case ACTION_RESYNC:
5322 		case ACTION_IDLE:
5323 			clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5324 			break;
5325 		default:
5326 			ret = -EINVAL;
5327 			goto out;
5328 		}
5329 	}
5330 
5331 	if (mddev->ro == MD_AUTO_READ) {
5332 		/* A write to sync_action is enough to justify
5333 		 * canceling read-auto mode
5334 		 */
5335 		mddev->ro = MD_RDWR;
5336 		md_wakeup_thread(mddev->sync_thread);
5337 	}
5338 
5339 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5340 	md_wakeup_thread(mddev->thread);
5341 	sysfs_notify_dirent_safe(mddev->sysfs_action);
5342 	ret = len;
5343 
5344 out:
5345 	mddev_unlock(mddev);
5346 	return ret;
5347 }
5348 
5349 static struct md_sysfs_entry md_scan_mode =
5350 __ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
5351 
5352 static ssize_t
last_sync_action_show(struct mddev * mddev,char * page)5353 last_sync_action_show(struct mddev *mddev, char *page)
5354 {
5355 	return sprintf(page, "%s\n",
5356 		       md_sync_action_name(mddev->last_sync_action));
5357 }
5358 
5359 static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action);
5360 
5361 static ssize_t
mismatch_cnt_show(struct mddev * mddev,char * page)5362 mismatch_cnt_show(struct mddev *mddev, char *page)
5363 {
5364 	return sprintf(page, "%llu\n",
5365 		       (unsigned long long)
5366 		       atomic64_read(&mddev->resync_mismatches));
5367 }
5368 
5369 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
5370 
5371 static ssize_t
sync_min_show(struct mddev * mddev,char * page)5372 sync_min_show(struct mddev *mddev, char *page)
5373 {
5374 	return sprintf(page, "%d (%s)\n", speed_min(mddev),
5375 		       mddev->sync_speed_min ? "local" : "system");
5376 }
5377 
5378 static ssize_t
sync_min_store(struct mddev * mddev,const char * buf,size_t len)5379 sync_min_store(struct mddev *mddev, const char *buf, size_t len)
5380 {
5381 	unsigned int min;
5382 	int rv;
5383 
5384 	if (strncmp(buf, "system", 6) == 0) {
5385 		min = 0;
5386 	} else {
5387 		rv = kstrtouint(buf, 10, &min);
5388 		if (rv < 0)
5389 			return rv;
5390 		if (min == 0)
5391 			return -EINVAL;
5392 	}
5393 	mddev->sync_speed_min = min;
5394 	return len;
5395 }
5396 
5397 static struct md_sysfs_entry md_sync_min =
5398 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
5399 
5400 static ssize_t
sync_max_show(struct mddev * mddev,char * page)5401 sync_max_show(struct mddev *mddev, char *page)
5402 {
5403 	return sprintf(page, "%d (%s)\n", speed_max(mddev),
5404 		       mddev->sync_speed_max ? "local" : "system");
5405 }
5406 
5407 static ssize_t
sync_max_store(struct mddev * mddev,const char * buf,size_t len)5408 sync_max_store(struct mddev *mddev, const char *buf, size_t len)
5409 {
5410 	unsigned int max;
5411 	int rv;
5412 
5413 	if (strncmp(buf, "system", 6) == 0) {
5414 		max = 0;
5415 	} else {
5416 		rv = kstrtouint(buf, 10, &max);
5417 		if (rv < 0)
5418 			return rv;
5419 		if (max == 0)
5420 			return -EINVAL;
5421 	}
5422 	mddev->sync_speed_max = max;
5423 	return len;
5424 }
5425 
5426 static struct md_sysfs_entry md_sync_max =
5427 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
5428 
5429 static ssize_t
sync_io_depth_show(struct mddev * mddev,char * page)5430 sync_io_depth_show(struct mddev *mddev, char *page)
5431 {
5432 	return sprintf(page, "%d (%s)\n", sync_io_depth(mddev),
5433 		       mddev->sync_io_depth ? "local" : "system");
5434 }
5435 
5436 static ssize_t
sync_io_depth_store(struct mddev * mddev,const char * buf,size_t len)5437 sync_io_depth_store(struct mddev *mddev, const char *buf, size_t len)
5438 {
5439 	unsigned int max;
5440 	int rv;
5441 
5442 	if (strncmp(buf, "system", 6) == 0) {
5443 		max = 0;
5444 	} else {
5445 		rv = kstrtouint(buf, 10, &max);
5446 		if (rv < 0)
5447 			return rv;
5448 		if (max == 0)
5449 			return -EINVAL;
5450 	}
5451 	mddev->sync_io_depth = max;
5452 	return len;
5453 }
5454 
5455 static struct md_sysfs_entry md_sync_io_depth =
5456 __ATTR_RW(sync_io_depth);
5457 
5458 static ssize_t
degraded_show(struct mddev * mddev,char * page)5459 degraded_show(struct mddev *mddev, char *page)
5460 {
5461 	return sprintf(page, "%d\n", mddev->degraded);
5462 }
5463 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
5464 
5465 static ssize_t
sync_force_parallel_show(struct mddev * mddev,char * page)5466 sync_force_parallel_show(struct mddev *mddev, char *page)
5467 {
5468 	return sprintf(page, "%d\n", mddev->parallel_resync);
5469 }
5470 
5471 static ssize_t
sync_force_parallel_store(struct mddev * mddev,const char * buf,size_t len)5472 sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len)
5473 {
5474 	long n;
5475 
5476 	if (kstrtol(buf, 10, &n))
5477 		return -EINVAL;
5478 
5479 	if (n != 0 && n != 1)
5480 		return -EINVAL;
5481 
5482 	mddev->parallel_resync = n;
5483 
5484 	if (mddev->sync_thread)
5485 		wake_up(&resync_wait);
5486 
5487 	return len;
5488 }
5489 
5490 /* force parallel resync, even with shared block devices */
5491 static struct md_sysfs_entry md_sync_force_parallel =
5492 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
5493        sync_force_parallel_show, sync_force_parallel_store);
5494 
5495 static ssize_t
sync_speed_show(struct mddev * mddev,char * page)5496 sync_speed_show(struct mddev *mddev, char *page)
5497 {
5498 	unsigned long resync, dt, db;
5499 	if (mddev->curr_resync == MD_RESYNC_NONE)
5500 		return sprintf(page, "none\n");
5501 	resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
5502 	dt = (jiffies - mddev->resync_mark) / HZ;
5503 	if (!dt) dt++;
5504 	db = resync - mddev->resync_mark_cnt;
5505 	return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
5506 }
5507 
5508 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
5509 
5510 static ssize_t
sync_completed_show(struct mddev * mddev,char * page)5511 sync_completed_show(struct mddev *mddev, char *page)
5512 {
5513 	unsigned long long max_sectors, resync;
5514 
5515 	if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5516 		return sprintf(page, "none\n");
5517 
5518 	if (mddev->curr_resync == MD_RESYNC_YIELDED ||
5519 	    mddev->curr_resync == MD_RESYNC_DELAYED)
5520 		return sprintf(page, "delayed\n");
5521 
5522 	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
5523 	    test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
5524 		max_sectors = mddev->resync_max_sectors;
5525 	else
5526 		max_sectors = mddev->dev_sectors;
5527 
5528 	resync = mddev->curr_resync_completed;
5529 	return sprintf(page, "%llu / %llu\n", resync, max_sectors);
5530 }
5531 
5532 static struct md_sysfs_entry md_sync_completed =
5533 	__ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL);
5534 
5535 static ssize_t
min_sync_show(struct mddev * mddev,char * page)5536 min_sync_show(struct mddev *mddev, char *page)
5537 {
5538 	return sprintf(page, "%llu\n",
5539 		       (unsigned long long)mddev->resync_min);
5540 }
5541 static ssize_t
min_sync_store(struct mddev * mddev,const char * buf,size_t len)5542 min_sync_store(struct mddev *mddev, const char *buf, size_t len)
5543 {
5544 	unsigned long long min;
5545 	int err;
5546 
5547 	if (kstrtoull(buf, 10, &min))
5548 		return -EINVAL;
5549 
5550 	spin_lock(&mddev->lock);
5551 	err = -EINVAL;
5552 	if (min > mddev->resync_max)
5553 		goto out_unlock;
5554 
5555 	err = -EBUSY;
5556 	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5557 		goto out_unlock;
5558 
5559 	/* Round down to multiple of 4K for safety */
5560 	mddev->resync_min = round_down(min, 8);
5561 	err = 0;
5562 
5563 out_unlock:
5564 	spin_unlock(&mddev->lock);
5565 	return err ?: len;
5566 }
5567 
5568 static struct md_sysfs_entry md_min_sync =
5569 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
5570 
5571 static ssize_t
max_sync_show(struct mddev * mddev,char * page)5572 max_sync_show(struct mddev *mddev, char *page)
5573 {
5574 	if (mddev->resync_max == MaxSector)
5575 		return sprintf(page, "max\n");
5576 	else
5577 		return sprintf(page, "%llu\n",
5578 			       (unsigned long long)mddev->resync_max);
5579 }
5580 static ssize_t
max_sync_store(struct mddev * mddev,const char * buf,size_t len)5581 max_sync_store(struct mddev *mddev, const char *buf, size_t len)
5582 {
5583 	int err;
5584 	spin_lock(&mddev->lock);
5585 	if (strncmp(buf, "max", 3) == 0)
5586 		mddev->resync_max = MaxSector;
5587 	else {
5588 		unsigned long long max;
5589 		int chunk;
5590 
5591 		err = -EINVAL;
5592 		if (kstrtoull(buf, 10, &max))
5593 			goto out_unlock;
5594 		if (max < mddev->resync_min)
5595 			goto out_unlock;
5596 
5597 		err = -EBUSY;
5598 		if (max < mddev->resync_max && md_is_rdwr(mddev) &&
5599 		    test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5600 			goto out_unlock;
5601 
5602 		/* Must be a multiple of chunk_size */
5603 		chunk = mddev->chunk_sectors;
5604 		if (chunk) {
5605 			sector_t temp = max;
5606 
5607 			err = -EINVAL;
5608 			if (sector_div(temp, chunk))
5609 				goto out_unlock;
5610 		}
5611 		mddev->resync_max = max;
5612 	}
5613 	wake_up(&mddev->recovery_wait);
5614 	err = 0;
5615 out_unlock:
5616 	spin_unlock(&mddev->lock);
5617 	return err ?: len;
5618 }
5619 
5620 static struct md_sysfs_entry md_max_sync =
5621 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
5622 
5623 static ssize_t
suspend_lo_show(struct mddev * mddev,char * page)5624 suspend_lo_show(struct mddev *mddev, char *page)
5625 {
5626 	return sprintf(page, "%llu\n",
5627 		       (unsigned long long)READ_ONCE(mddev->suspend_lo));
5628 }
5629 
5630 static ssize_t
suspend_lo_store(struct mddev * mddev,const char * buf,size_t len)5631 suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
5632 {
5633 	unsigned long long new;
5634 	int err;
5635 
5636 	err = kstrtoull(buf, 10, &new);
5637 	if (err < 0)
5638 		return err;
5639 	if (new != (sector_t)new)
5640 		return -EINVAL;
5641 
5642 	err = mddev_suspend(mddev, true);
5643 	if (err)
5644 		return err;
5645 
5646 	WRITE_ONCE(mddev->suspend_lo, new);
5647 	mddev_resume(mddev);
5648 
5649 	return len;
5650 }
5651 static struct md_sysfs_entry md_suspend_lo =
5652 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
5653 
5654 static ssize_t
suspend_hi_show(struct mddev * mddev,char * page)5655 suspend_hi_show(struct mddev *mddev, char *page)
5656 {
5657 	return sprintf(page, "%llu\n",
5658 		       (unsigned long long)READ_ONCE(mddev->suspend_hi));
5659 }
5660 
5661 static ssize_t
suspend_hi_store(struct mddev * mddev,const char * buf,size_t len)5662 suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
5663 {
5664 	unsigned long long new;
5665 	int err;
5666 
5667 	err = kstrtoull(buf, 10, &new);
5668 	if (err < 0)
5669 		return err;
5670 	if (new != (sector_t)new)
5671 		return -EINVAL;
5672 
5673 	err = mddev_suspend(mddev, true);
5674 	if (err)
5675 		return err;
5676 
5677 	WRITE_ONCE(mddev->suspend_hi, new);
5678 	mddev_resume(mddev);
5679 
5680 	return len;
5681 }
5682 static struct md_sysfs_entry md_suspend_hi =
5683 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
5684 
5685 static ssize_t
reshape_position_show(struct mddev * mddev,char * page)5686 reshape_position_show(struct mddev *mddev, char *page)
5687 {
5688 	if (mddev->reshape_position != MaxSector)
5689 		return sprintf(page, "%llu\n",
5690 			       (unsigned long long)mddev->reshape_position);
5691 	strcpy(page, "none\n");
5692 	return 5;
5693 }
5694 
5695 static ssize_t
reshape_position_store(struct mddev * mddev,const char * buf,size_t len)5696 reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
5697 {
5698 	struct md_rdev *rdev;
5699 	unsigned long long new;
5700 	int err;
5701 
5702 	err = kstrtoull(buf, 10, &new);
5703 	if (err < 0)
5704 		return err;
5705 	if (new != (sector_t)new)
5706 		return -EINVAL;
5707 	err = mddev_lock(mddev);
5708 	if (err)
5709 		return err;
5710 	err = -EBUSY;
5711 	if (mddev->pers)
5712 		goto unlock;
5713 	mddev->reshape_position = new;
5714 	mddev->delta_disks = 0;
5715 	mddev->reshape_backwards = 0;
5716 	mddev->new_level = mddev->level;
5717 	mddev->new_layout = mddev->layout;
5718 	mddev->new_chunk_sectors = mddev->chunk_sectors;
5719 	rdev_for_each(rdev, mddev)
5720 		rdev->new_data_offset = rdev->data_offset;
5721 	err = 0;
5722 unlock:
5723 	mddev_unlock(mddev);
5724 	return err ?: len;
5725 }
5726 
5727 static struct md_sysfs_entry md_reshape_position =
5728 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
5729        reshape_position_store);
5730 
5731 static ssize_t
reshape_direction_show(struct mddev * mddev,char * page)5732 reshape_direction_show(struct mddev *mddev, char *page)
5733 {
5734 	return sprintf(page, "%s\n",
5735 		       mddev->reshape_backwards ? "backwards" : "forwards");
5736 }
5737 
5738 static ssize_t
reshape_direction_store(struct mddev * mddev,const char * buf,size_t len)5739 reshape_direction_store(struct mddev *mddev, const char *buf, size_t len)
5740 {
5741 	int backwards = 0;
5742 	int err;
5743 
5744 	if (cmd_match(buf, "forwards"))
5745 		backwards = 0;
5746 	else if (cmd_match(buf, "backwards"))
5747 		backwards = 1;
5748 	else
5749 		return -EINVAL;
5750 	if (mddev->reshape_backwards == backwards)
5751 		return len;
5752 
5753 	err = mddev_lock(mddev);
5754 	if (err)
5755 		return err;
5756 	/* check if we are allowed to change */
5757 	if (mddev->delta_disks)
5758 		err = -EBUSY;
5759 	else if (mddev->persistent &&
5760 	    mddev->major_version == 0)
5761 		err =  -EINVAL;
5762 	else
5763 		mddev->reshape_backwards = backwards;
5764 	mddev_unlock(mddev);
5765 	return err ?: len;
5766 }
5767 
5768 static struct md_sysfs_entry md_reshape_direction =
5769 __ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show,
5770        reshape_direction_store);
5771 
5772 static ssize_t
array_size_show(struct mddev * mddev,char * page)5773 array_size_show(struct mddev *mddev, char *page)
5774 {
5775 	if (mddev->external_size)
5776 		return sprintf(page, "%llu\n",
5777 			       (unsigned long long)mddev->array_sectors/2);
5778 	else
5779 		return sprintf(page, "default\n");
5780 }
5781 
5782 static ssize_t
array_size_store(struct mddev * mddev,const char * buf,size_t len)5783 array_size_store(struct mddev *mddev, const char *buf, size_t len)
5784 {
5785 	sector_t sectors;
5786 	int err;
5787 
5788 	err = mddev_lock(mddev);
5789 	if (err)
5790 		return err;
5791 
5792 	/* cluster raid doesn't support change array_sectors */
5793 	if (mddev_is_clustered(mddev)) {
5794 		mddev_unlock(mddev);
5795 		return -EINVAL;
5796 	}
5797 
5798 	if (strncmp(buf, "default", 7) == 0) {
5799 		if (mddev->pers)
5800 			sectors = mddev->pers->size(mddev, 0, 0);
5801 		else
5802 			sectors = mddev->array_sectors;
5803 
5804 		mddev->external_size = 0;
5805 	} else {
5806 		if (strict_blocks_to_sectors(buf, &sectors) < 0)
5807 			err = -EINVAL;
5808 		else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
5809 			err = -E2BIG;
5810 		else
5811 			mddev->external_size = 1;
5812 	}
5813 
5814 	if (!err) {
5815 		mddev->array_sectors = sectors;
5816 		if (mddev->pers)
5817 			set_capacity_and_notify(mddev->gendisk,
5818 						mddev->array_sectors);
5819 	}
5820 	mddev_unlock(mddev);
5821 	return err ?: len;
5822 }
5823 
5824 static struct md_sysfs_entry md_array_size =
5825 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
5826        array_size_store);
5827 
5828 static ssize_t
consistency_policy_show(struct mddev * mddev,char * page)5829 consistency_policy_show(struct mddev *mddev, char *page)
5830 {
5831 	int ret;
5832 
5833 	if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
5834 		ret = sprintf(page, "journal\n");
5835 	} else if (test_bit(MD_HAS_PPL, &mddev->flags)) {
5836 		ret = sprintf(page, "ppl\n");
5837 	} else if (mddev->bitmap) {
5838 		ret = sprintf(page, "bitmap\n");
5839 	} else if (mddev->pers) {
5840 		if (mddev->pers->sync_request)
5841 			ret = sprintf(page, "resync\n");
5842 		else
5843 			ret = sprintf(page, "none\n");
5844 	} else {
5845 		ret = sprintf(page, "unknown\n");
5846 	}
5847 
5848 	return ret;
5849 }
5850 
5851 static ssize_t
consistency_policy_store(struct mddev * mddev,const char * buf,size_t len)5852 consistency_policy_store(struct mddev *mddev, const char *buf, size_t len)
5853 {
5854 	int err = 0;
5855 
5856 	if (mddev->pers) {
5857 		if (mddev->pers->change_consistency_policy)
5858 			err = mddev->pers->change_consistency_policy(mddev, buf);
5859 		else
5860 			err = -EBUSY;
5861 	} else if (mddev->external && strncmp(buf, "ppl", 3) == 0) {
5862 		set_bit(MD_HAS_PPL, &mddev->flags);
5863 	} else {
5864 		err = -EINVAL;
5865 	}
5866 
5867 	return err ? err : len;
5868 }
5869 
5870 static struct md_sysfs_entry md_consistency_policy =
5871 __ATTR(consistency_policy, S_IRUGO | S_IWUSR, consistency_policy_show,
5872        consistency_policy_store);
5873 
fail_last_dev_show(struct mddev * mddev,char * page)5874 static ssize_t fail_last_dev_show(struct mddev *mddev, char *page)
5875 {
5876 	return sprintf(page, "%d\n", test_bit(MD_FAILLAST_DEV, &mddev->flags));
5877 }
5878 
5879 /*
5880  * Setting MD_FAILLAST_DEV to allow last device to be forcibly removed
5881  * from RAID1/RAID10.
5882  */
5883 static ssize_t
fail_last_dev_store(struct mddev * mddev,const char * buf,size_t len)5884 fail_last_dev_store(struct mddev *mddev, const char *buf, size_t len)
5885 {
5886 	int ret;
5887 	bool value;
5888 
5889 	ret = kstrtobool(buf, &value);
5890 	if (ret)
5891 		return ret;
5892 
5893 	if (value)
5894 		set_bit(MD_FAILLAST_DEV, &mddev->flags);
5895 	else
5896 		clear_bit(MD_FAILLAST_DEV, &mddev->flags);
5897 
5898 	return len;
5899 }
5900 static struct md_sysfs_entry md_fail_last_dev =
5901 __ATTR(fail_last_dev, S_IRUGO | S_IWUSR, fail_last_dev_show,
5902        fail_last_dev_store);
5903 
serialize_policy_show(struct mddev * mddev,char * page)5904 static ssize_t serialize_policy_show(struct mddev *mddev, char *page)
5905 {
5906 	if (mddev->pers == NULL || (mddev->pers->head.id != ID_RAID1))
5907 		return sprintf(page, "n/a\n");
5908 	else
5909 		return sprintf(page, "%d\n",
5910 			       test_bit(MD_SERIALIZE_POLICY, &mddev->flags));
5911 }
5912 
5913 /*
5914  * Setting MD_SERIALIZE_POLICY enforce write IO is not reordered
5915  * for raid1.
5916  */
5917 static ssize_t
serialize_policy_store(struct mddev * mddev,const char * buf,size_t len)5918 serialize_policy_store(struct mddev *mddev, const char *buf, size_t len)
5919 {
5920 	int err;
5921 	bool value;
5922 
5923 	err = kstrtobool(buf, &value);
5924 	if (err)
5925 		return err;
5926 
5927 	if (value == test_bit(MD_SERIALIZE_POLICY, &mddev->flags))
5928 		return len;
5929 
5930 	err = mddev_suspend_and_lock(mddev);
5931 	if (err)
5932 		return err;
5933 	if (mddev->pers == NULL || (mddev->pers->head.id != ID_RAID1)) {
5934 		pr_err("md: serialize_policy is only effective for raid1\n");
5935 		err = -EINVAL;
5936 		goto unlock;
5937 	}
5938 
5939 	if (value) {
5940 		mddev_create_serial_pool(mddev, NULL);
5941 		set_bit(MD_SERIALIZE_POLICY, &mddev->flags);
5942 	} else {
5943 		mddev_destroy_serial_pool(mddev, NULL);
5944 		clear_bit(MD_SERIALIZE_POLICY, &mddev->flags);
5945 	}
5946 unlock:
5947 	mddev_unlock_and_resume(mddev);
5948 	return err ?: len;
5949 }
5950 
5951 static struct md_sysfs_entry md_serialize_policy =
5952 __ATTR(serialize_policy, S_IRUGO | S_IWUSR, serialize_policy_show,
5953        serialize_policy_store);
5954 
mddev_set_logical_block_size(struct mddev * mddev,unsigned int lbs)5955 static int mddev_set_logical_block_size(struct mddev *mddev,
5956 				unsigned int lbs)
5957 {
5958 	int err = 0;
5959 	struct queue_limits lim;
5960 
5961 	if (queue_logical_block_size(mddev->gendisk->queue) >= lbs) {
5962 		pr_err("%s: Cannot set LBS smaller than mddev LBS %u\n",
5963 		       mdname(mddev), lbs);
5964 		return -EINVAL;
5965 	}
5966 
5967 	lim = queue_limits_start_update(mddev->gendisk->queue);
5968 	lim.logical_block_size = lbs;
5969 	pr_info("%s: logical_block_size is changed, data may be lost\n",
5970 		mdname(mddev));
5971 	err = queue_limits_commit_update(mddev->gendisk->queue, &lim);
5972 	if (err)
5973 		return err;
5974 
5975 	mddev->logical_block_size = lbs;
5976 	/* New lbs will be written to superblock after array is running */
5977 	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
5978 	return 0;
5979 }
5980 
5981 static ssize_t
lbs_show(struct mddev * mddev,char * page)5982 lbs_show(struct mddev *mddev, char *page)
5983 {
5984 	return sprintf(page, "%u\n", mddev->logical_block_size);
5985 }
5986 
5987 static ssize_t
lbs_store(struct mddev * mddev,const char * buf,size_t len)5988 lbs_store(struct mddev *mddev, const char *buf, size_t len)
5989 {
5990 	unsigned int lbs;
5991 	int err = -EBUSY;
5992 
5993 	/* Only 1.x meta supports configurable LBS */
5994 	if (mddev->major_version == 0)
5995 		return -EINVAL;
5996 
5997 	err = kstrtouint(buf, 10, &lbs);
5998 	if (err < 0)
5999 		return -EINVAL;
6000 
6001 	if (mddev->pers) {
6002 		unsigned int curr_lbs;
6003 
6004 		if (mddev->logical_block_size)
6005 			return -EBUSY;
6006 		/*
6007 		 * To fix forward compatibility issues, LBS is not
6008 		 * configured for arrays from old kernels (<=6.18) by default.
6009 		 * If the user confirms no rollback to old kernels,
6010 		 * enable LBS by writing current LBS — to prevent data
6011 		 * loss from LBS changes.
6012 		 */
6013 		curr_lbs = queue_logical_block_size(mddev->gendisk->queue);
6014 		if (lbs != curr_lbs)
6015 			return -EINVAL;
6016 
6017 		mddev->logical_block_size = curr_lbs;
6018 		set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
6019 		pr_info("%s: logical block size configured successfully, array will not be assembled in old kernels (<= 6.18)\n",
6020 			mdname(mddev));
6021 		return len;
6022 	}
6023 
6024 	err = mddev_lock(mddev);
6025 	if (err)
6026 		goto unlock;
6027 
6028 	err = mddev_set_logical_block_size(mddev, lbs);
6029 
6030 unlock:
6031 	mddev_unlock(mddev);
6032 	return err ?: len;
6033 }
6034 
6035 static struct md_sysfs_entry md_logical_block_size =
6036 __ATTR(logical_block_size, 0644, lbs_show, lbs_store);
6037 
6038 static struct attribute *md_default_attrs[] = {
6039 	&md_level.attr,
6040 	&md_new_level.attr,
6041 	&md_bitmap_type.attr,
6042 	&md_layout.attr,
6043 	&md_raid_disks.attr,
6044 	&md_uuid.attr,
6045 	&md_chunk_size.attr,
6046 	&md_size.attr,
6047 	&md_resync_start.attr,
6048 	&md_metadata.attr,
6049 	&md_new_device.attr,
6050 	&md_safe_delay.attr,
6051 	&md_array_state.attr,
6052 	&md_reshape_position.attr,
6053 	&md_reshape_direction.attr,
6054 	&md_array_size.attr,
6055 	&max_corr_read_errors.attr,
6056 	&md_consistency_policy.attr,
6057 	&md_fail_last_dev.attr,
6058 	&md_serialize_policy.attr,
6059 	&md_logical_block_size.attr,
6060 	NULL,
6061 };
6062 
6063 static const struct attribute_group md_default_group = {
6064 	.attrs = md_default_attrs,
6065 };
6066 
6067 static struct attribute *md_redundancy_attrs[] = {
6068 	&md_scan_mode.attr,
6069 	&md_last_scan_mode.attr,
6070 	&md_mismatches.attr,
6071 	&md_sync_min.attr,
6072 	&md_sync_max.attr,
6073 	&md_sync_io_depth.attr,
6074 	&md_sync_speed.attr,
6075 	&md_sync_force_parallel.attr,
6076 	&md_sync_completed.attr,
6077 	&md_min_sync.attr,
6078 	&md_max_sync.attr,
6079 	&md_suspend_lo.attr,
6080 	&md_suspend_hi.attr,
6081 	&md_bitmap.attr,
6082 	&md_degraded.attr,
6083 	NULL,
6084 };
6085 static const struct attribute_group md_redundancy_group = {
6086 	.name = NULL,
6087 	.attrs = md_redundancy_attrs,
6088 };
6089 
6090 static const struct attribute_group *md_attr_groups[] = {
6091 	&md_default_group,
6092 	NULL,
6093 };
6094 
6095 static ssize_t
md_attr_show(struct kobject * kobj,struct attribute * attr,char * page)6096 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
6097 {
6098 	struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
6099 	struct mddev *mddev = container_of(kobj, struct mddev, kobj);
6100 	ssize_t rv;
6101 
6102 	if (!entry->show)
6103 		return -EIO;
6104 	spin_lock(&all_mddevs_lock);
6105 	if (!mddev_get(mddev)) {
6106 		spin_unlock(&all_mddevs_lock);
6107 		return -EBUSY;
6108 	}
6109 	spin_unlock(&all_mddevs_lock);
6110 
6111 	rv = entry->show(mddev, page);
6112 	mddev_put(mddev);
6113 	return rv;
6114 }
6115 
6116 static ssize_t
md_attr_store(struct kobject * kobj,struct attribute * attr,const char * page,size_t length)6117 md_attr_store(struct kobject *kobj, struct attribute *attr,
6118 	      const char *page, size_t length)
6119 {
6120 	struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
6121 	struct mddev *mddev = container_of(kobj, struct mddev, kobj);
6122 	ssize_t rv;
6123 	struct kernfs_node *kn = NULL;
6124 
6125 	if (!entry->store)
6126 		return -EIO;
6127 	if (!capable(CAP_SYS_ADMIN))
6128 		return -EACCES;
6129 
6130 	if (entry->store == array_state_store && cmd_match(page, "clear"))
6131 		kn = sysfs_break_active_protection(kobj, attr);
6132 
6133 	spin_lock(&all_mddevs_lock);
6134 	if (!mddev_get(mddev)) {
6135 		spin_unlock(&all_mddevs_lock);
6136 		if (kn)
6137 			sysfs_unbreak_active_protection(kn);
6138 		return -EBUSY;
6139 	}
6140 	spin_unlock(&all_mddevs_lock);
6141 	rv = entry->store(mddev, page, length);
6142 
6143 	/*
6144 	 * For "array_state=clear", dropping the extra kobject reference from
6145 	 * sysfs_break_active_protection() can trigger md kobject deletion.
6146 	 * Restore active protection before mddev_put() so deletion happens
6147 	 * after the sysfs write path fully unwinds.
6148 	 */
6149 	if (kn)
6150 		sysfs_unbreak_active_protection(kn);
6151 	mddev_put(mddev);
6152 
6153 	return rv;
6154 }
6155 
md_kobj_release(struct kobject * ko)6156 static void md_kobj_release(struct kobject *ko)
6157 {
6158 	struct mddev *mddev = container_of(ko, struct mddev, kobj);
6159 
6160 	if (legacy_async_del_gendisk) {
6161 		if (mddev->sysfs_state)
6162 			sysfs_put(mddev->sysfs_state);
6163 		if (mddev->sysfs_level)
6164 			sysfs_put(mddev->sysfs_level);
6165 		del_gendisk(mddev->gendisk);
6166 	}
6167 	put_disk(mddev->gendisk);
6168 }
6169 
6170 static const struct sysfs_ops md_sysfs_ops = {
6171 	.show	= md_attr_show,
6172 	.store	= md_attr_store,
6173 };
6174 static const struct kobj_type md_ktype = {
6175 	.release	= md_kobj_release,
6176 	.sysfs_ops	= &md_sysfs_ops,
6177 	.default_groups	= md_attr_groups,
6178 };
6179 
6180 int mdp_major = 0;
6181 
6182 /* stack the limit for all rdevs into lim */
mddev_stack_rdev_limits(struct mddev * mddev,struct queue_limits * lim,unsigned int flags)6183 int mddev_stack_rdev_limits(struct mddev *mddev, struct queue_limits *lim,
6184 		unsigned int flags)
6185 {
6186 	struct md_rdev *rdev;
6187 
6188 	rdev_for_each(rdev, mddev) {
6189 		queue_limits_stack_bdev(lim, rdev->bdev, rdev->data_offset,
6190 					mddev->gendisk->disk_name);
6191 		if ((flags & MDDEV_STACK_INTEGRITY) &&
6192 		    !queue_limits_stack_integrity_bdev(lim, rdev->bdev))
6193 			return -EINVAL;
6194 	}
6195 
6196 	/*
6197 	 * Before RAID adding folio support, the logical_block_size
6198 	 * should be smaller than the page size.
6199 	 */
6200 	if (lim->logical_block_size > PAGE_SIZE) {
6201 		pr_err("%s: logical_block_size must not larger than PAGE_SIZE\n",
6202 			mdname(mddev));
6203 		return -EINVAL;
6204 	}
6205 
6206 	/* Only 1.x meta needs to set logical block size */
6207 	if (mddev->major_version == 0)
6208 		return 0;
6209 
6210 	/*
6211 	 * Fix forward compatibility issue. Only set LBS by default for
6212 	 * new arrays, mddev->events == 0 indicates the array was just
6213 	 * created. When assembling an array, read LBS from the superblock
6214 	 * instead — LBS is 0 in superblocks created by old kernels.
6215 	 */
6216 	if (!mddev->events) {
6217 		pr_info("%s: array will not be assembled in old kernels that lack configurable LBS support (<= 6.18)\n",
6218 			mdname(mddev));
6219 		mddev->logical_block_size = lim->logical_block_size;
6220 	}
6221 
6222 	if (!mddev->logical_block_size)
6223 		pr_warn("%s: echo current LBS to md/logical_block_size to prevent data loss issues from LBS changes.\n"
6224 			"\tNote: After setting, array will not be assembled in old kernels (<= 6.18)\n",
6225 			mdname(mddev));
6226 
6227 	return 0;
6228 }
6229 EXPORT_SYMBOL_GPL(mddev_stack_rdev_limits);
6230 
6231 /* apply the extra stacking limits from a new rdev into mddev */
mddev_stack_new_rdev(struct mddev * mddev,struct md_rdev * rdev)6232 int mddev_stack_new_rdev(struct mddev *mddev, struct md_rdev *rdev)
6233 {
6234 	struct queue_limits lim;
6235 
6236 	if (mddev_is_dm(mddev))
6237 		return 0;
6238 
6239 	if (queue_logical_block_size(rdev->bdev->bd_disk->queue) >
6240 	    queue_logical_block_size(mddev->gendisk->queue)) {
6241 		pr_err("%s: incompatible logical_block_size, can not add\n",
6242 		       mdname(mddev));
6243 		return -EINVAL;
6244 	}
6245 
6246 	lim = queue_limits_start_update(mddev->gendisk->queue);
6247 	queue_limits_stack_bdev(&lim, rdev->bdev, rdev->data_offset,
6248 				mddev->gendisk->disk_name);
6249 
6250 	if (!queue_limits_stack_integrity_bdev(&lim, rdev->bdev)) {
6251 		pr_err("%s: incompatible integrity profile for %pg\n",
6252 		       mdname(mddev), rdev->bdev);
6253 		queue_limits_cancel_update(mddev->gendisk->queue);
6254 		return -ENXIO;
6255 	}
6256 
6257 	return queue_limits_commit_update(mddev->gendisk->queue, &lim);
6258 }
6259 EXPORT_SYMBOL_GPL(mddev_stack_new_rdev);
6260 
6261 /* update the optimal I/O size after a reshape */
mddev_update_io_opt(struct mddev * mddev,unsigned int nr_stripes)6262 void mddev_update_io_opt(struct mddev *mddev, unsigned int nr_stripes)
6263 {
6264 	struct queue_limits lim;
6265 
6266 	if (mddev_is_dm(mddev))
6267 		return;
6268 
6269 	/* don't bother updating io_opt if we can't suspend the array */
6270 	if (mddev_suspend(mddev, false) < 0)
6271 		return;
6272 	lim = queue_limits_start_update(mddev->gendisk->queue);
6273 	lim.io_opt = lim.io_min * nr_stripes;
6274 	queue_limits_commit_update(mddev->gendisk->queue, &lim);
6275 	mddev_resume(mddev);
6276 }
6277 EXPORT_SYMBOL_GPL(mddev_update_io_opt);
6278 
mddev_delayed_delete(struct work_struct * ws)6279 static void mddev_delayed_delete(struct work_struct *ws)
6280 {
6281 	struct mddev *mddev = container_of(ws, struct mddev, del_work);
6282 
6283 	kobject_put(&mddev->kobj);
6284 }
6285 
md_init_stacking_limits(struct queue_limits * lim)6286 void md_init_stacking_limits(struct queue_limits *lim)
6287 {
6288 	blk_set_stacking_limits(lim);
6289 	lim->features = BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA |
6290 			BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT;
6291 }
6292 EXPORT_SYMBOL_GPL(md_init_stacking_limits);
6293 
md_alloc(dev_t dev,char * name)6294 struct mddev *md_alloc(dev_t dev, char *name)
6295 {
6296 	/*
6297 	 * If dev is zero, name is the name of a device to allocate with
6298 	 * an arbitrary minor number.  It will be "md_???"
6299 	 * If dev is non-zero it must be a device number with a MAJOR of
6300 	 * MD_MAJOR or mdp_major.  In this case, if "name" is NULL, then
6301 	 * the device is being created by opening a node in /dev.
6302 	 * If "name" is not NULL, the device is being created by
6303 	 * writing to /sys/module/md_mod/parameters/new_array.
6304 	 */
6305 	static DEFINE_MUTEX(disks_mutex);
6306 	struct mddev *mddev;
6307 	struct gendisk *disk;
6308 	int partitioned;
6309 	int shift;
6310 	int unit;
6311 	int error;
6312 
6313 	/*
6314 	 * Wait for any previous instance of this device to be completely
6315 	 * removed (mddev_delayed_delete).
6316 	 */
6317 	flush_workqueue(md_misc_wq);
6318 
6319 	mutex_lock(&disks_mutex);
6320 	mddev = mddev_alloc(dev);
6321 	if (IS_ERR(mddev)) {
6322 		error = PTR_ERR(mddev);
6323 		goto out_unlock;
6324 	}
6325 
6326 	partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
6327 	shift = partitioned ? MdpMinorShift : 0;
6328 	unit = MINOR(mddev->unit) >> shift;
6329 
6330 	if (name && !dev) {
6331 		/* Need to ensure that 'name' is not a duplicate.
6332 		 */
6333 		struct mddev *mddev2;
6334 		spin_lock(&all_mddevs_lock);
6335 
6336 		list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
6337 			if (mddev2->gendisk &&
6338 			    strcmp(mddev2->gendisk->disk_name, name) == 0) {
6339 				spin_unlock(&all_mddevs_lock);
6340 				error = -EEXIST;
6341 				goto out_free_mddev;
6342 			}
6343 		spin_unlock(&all_mddevs_lock);
6344 	}
6345 	if (name && dev)
6346 		/*
6347 		 * Creating /dev/mdNNN via "newarray", so adjust hold_active.
6348 		 */
6349 		mddev->hold_active = UNTIL_STOP;
6350 
6351 	disk = blk_alloc_disk(NULL, NUMA_NO_NODE);
6352 	if (IS_ERR(disk)) {
6353 		error = PTR_ERR(disk);
6354 		goto out_free_mddev;
6355 	}
6356 
6357 	disk->major = MAJOR(mddev->unit);
6358 	disk->first_minor = unit << shift;
6359 	disk->minors = 1 << shift;
6360 	if (name)
6361 		strcpy(disk->disk_name, name);
6362 	else if (partitioned)
6363 		sprintf(disk->disk_name, "md_d%d", unit);
6364 	else
6365 		sprintf(disk->disk_name, "md%d", unit);
6366 	disk->fops = &md_fops;
6367 	disk->private_data = mddev;
6368 
6369 	disk->events |= DISK_EVENT_MEDIA_CHANGE;
6370 	mddev->gendisk = disk;
6371 	error = add_disk(disk);
6372 	if (error)
6373 		goto out_put_disk;
6374 
6375 	kobject_init(&mddev->kobj, &md_ktype);
6376 	error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md");
6377 	if (error) {
6378 		/*
6379 		 * The disk is already live at this point.  Clear the hold flag
6380 		 * and let mddev_put take care of the deletion, as it isn't any
6381 		 * different from a normal close on last release now.
6382 		 */
6383 		mddev->hold_active = 0;
6384 		mutex_unlock(&disks_mutex);
6385 		mddev_put(mddev);
6386 		return ERR_PTR(error);
6387 	}
6388 
6389 	kobject_uevent(&mddev->kobj, KOBJ_ADD);
6390 	mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
6391 	mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level");
6392 	mutex_unlock(&disks_mutex);
6393 	return mddev;
6394 
6395 out_put_disk:
6396 	put_disk(disk);
6397 out_free_mddev:
6398 	mddev_free(mddev);
6399 out_unlock:
6400 	mutex_unlock(&disks_mutex);
6401 	return ERR_PTR(error);
6402 }
6403 
md_alloc_and_put(dev_t dev,char * name)6404 static int md_alloc_and_put(dev_t dev, char *name)
6405 {
6406 	struct mddev *mddev = md_alloc(dev, name);
6407 
6408 	if (legacy_async_del_gendisk)
6409 		pr_warn("md: async del_gendisk mode will be removed in future, please upgrade to mdadm-4.5+\n");
6410 
6411 	if (IS_ERR(mddev))
6412 		return PTR_ERR(mddev);
6413 	mddev_put(mddev);
6414 	return 0;
6415 }
6416 
md_probe(dev_t dev)6417 static void md_probe(dev_t dev)
6418 {
6419 	if (MAJOR(dev) == MD_MAJOR && MINOR(dev) >= 512)
6420 		return;
6421 	if (create_on_open)
6422 		md_alloc_and_put(dev, NULL);
6423 }
6424 
add_named_array(const char * val,const struct kernel_param * kp)6425 static int add_named_array(const char *val, const struct kernel_param *kp)
6426 {
6427 	/*
6428 	 * val must be "md_*" or "mdNNN".
6429 	 * For "md_*" we allocate an array with a large free minor number, and
6430 	 * set the name to val.  val must not already be an active name.
6431 	 * For "mdNNN" we allocate an array with the minor number NNN
6432 	 * which must not already be in use.
6433 	 */
6434 	int len = strlen(val);
6435 	char buf[DISK_NAME_LEN];
6436 	unsigned long devnum;
6437 
6438 	while (len && val[len-1] == '\n')
6439 		len--;
6440 	if (len >= DISK_NAME_LEN)
6441 		return -E2BIG;
6442 	strscpy(buf, val, len+1);
6443 	if (strncmp(buf, "md_", 3) == 0)
6444 		return md_alloc_and_put(0, buf);
6445 	if (strncmp(buf, "md", 2) == 0 &&
6446 	    isdigit(buf[2]) &&
6447 	    kstrtoul(buf+2, 10, &devnum) == 0 &&
6448 	    devnum <= MINORMASK)
6449 		return md_alloc_and_put(MKDEV(MD_MAJOR, devnum), NULL);
6450 
6451 	return -EINVAL;
6452 }
6453 
md_safemode_timeout(struct timer_list * t)6454 static void md_safemode_timeout(struct timer_list *t)
6455 {
6456 	struct mddev *mddev = timer_container_of(mddev, t, safemode_timer);
6457 
6458 	mddev->safemode = 1;
6459 	if (mddev->external)
6460 		sysfs_notify_dirent_safe(mddev->sysfs_state);
6461 
6462 	md_wakeup_thread(mddev->thread);
6463 }
6464 
6465 static int start_dirty_degraded;
6466 
6467 /*
6468  * Read bitmap superblock and return the bitmap_id based on disk version.
6469  * This is used as fallback when default bitmap version and on-disk version
6470  * doesn't match, and mdadm is not the latest version to set bitmap_type.
6471  */
md_bitmap_get_id_from_sb(struct mddev * mddev)6472 static enum md_submodule_id md_bitmap_get_id_from_sb(struct mddev *mddev)
6473 {
6474 	struct md_rdev *rdev;
6475 	struct page *sb_page;
6476 	bitmap_super_t *sb;
6477 	enum md_submodule_id id = ID_BITMAP_NONE;
6478 	sector_t sector;
6479 	u32 version;
6480 
6481 	if (!mddev->bitmap_info.offset)
6482 		return ID_BITMAP_NONE;
6483 
6484 	sb_page = alloc_page(GFP_KERNEL);
6485 	if (!sb_page) {
6486 		pr_warn("md: %s: failed to allocate memory for bitmap\n",
6487 			mdname(mddev));
6488 		return ID_BITMAP_NONE;
6489 	}
6490 
6491 	sector = mddev->bitmap_info.offset;
6492 
6493 	rdev_for_each(rdev, mddev) {
6494 		u32 iosize;
6495 
6496 		if (!test_bit(In_sync, &rdev->flags) ||
6497 		    test_bit(Faulty, &rdev->flags) ||
6498 		    test_bit(Bitmap_sync, &rdev->flags))
6499 			continue;
6500 
6501 		iosize = roundup(sizeof(bitmap_super_t),
6502 				 bdev_logical_block_size(rdev->bdev));
6503 		if (sync_page_io(rdev, sector, iosize, sb_page, REQ_OP_READ,
6504 				 true))
6505 			goto read_ok;
6506 	}
6507 	pr_warn("md: %s: failed to read bitmap from any device\n",
6508 		mdname(mddev));
6509 	goto out;
6510 
6511 read_ok:
6512 	sb = kmap_local_page(sb_page);
6513 	if (sb->magic != cpu_to_le32(BITMAP_MAGIC)) {
6514 		pr_warn("md: %s: invalid bitmap magic 0x%x\n",
6515 			mdname(mddev), le32_to_cpu(sb->magic));
6516 		goto out_unmap;
6517 	}
6518 
6519 	version = le32_to_cpu(sb->version);
6520 	switch (version) {
6521 	case BITMAP_MAJOR_LO:
6522 	case BITMAP_MAJOR_HI:
6523 	case BITMAP_MAJOR_CLUSTERED:
6524 		id = ID_BITMAP;
6525 		break;
6526 	case BITMAP_MAJOR_LOCKLESS:
6527 		id = ID_LLBITMAP;
6528 		break;
6529 	default:
6530 		pr_warn("md: %s: unknown bitmap version %u\n",
6531 			mdname(mddev), version);
6532 		break;
6533 	}
6534 
6535 out_unmap:
6536 	kunmap_local(sb);
6537 out:
6538 	__free_page(sb_page);
6539 	return id;
6540 }
6541 
md_bitmap_create(struct mddev * mddev)6542 static int md_bitmap_create(struct mddev *mddev)
6543 {
6544 	enum md_submodule_id orig_id = mddev->bitmap_id;
6545 	enum md_submodule_id sb_id;
6546 	int err;
6547 
6548 	if (mddev->bitmap_id == ID_BITMAP_NONE)
6549 		return -EINVAL;
6550 
6551 	if (!mddev_set_bitmap_ops(mddev))
6552 		return -ENOENT;
6553 
6554 	err = mddev->bitmap_ops->create(mddev);
6555 	if (!err)
6556 		return 0;
6557 
6558 	/*
6559 	 * Create failed, if default bitmap version and on-disk version
6560 	 * doesn't match, and mdadm is not the latest version to set
6561 	 * bitmap_type, set bitmap_ops based on the disk version.
6562 	 */
6563 	mddev_clear_bitmap_ops(mddev);
6564 
6565 	sb_id = md_bitmap_get_id_from_sb(mddev);
6566 	if (sb_id == ID_BITMAP_NONE || sb_id == orig_id)
6567 		return err;
6568 
6569 	pr_info("md: %s: bitmap version mismatch, switching from %d to %d\n",
6570 		mdname(mddev), orig_id, sb_id);
6571 
6572 	mddev->bitmap_id = sb_id;
6573 	if (!mddev_set_bitmap_ops(mddev)) {
6574 		mddev->bitmap_id = orig_id;
6575 		return -ENOENT;
6576 	}
6577 
6578 	err = mddev->bitmap_ops->create(mddev);
6579 	if (err) {
6580 		mddev_clear_bitmap_ops(mddev);
6581 		mddev->bitmap_id = orig_id;
6582 	}
6583 
6584 	return err;
6585 }
6586 
md_bitmap_destroy(struct mddev * mddev)6587 static void md_bitmap_destroy(struct mddev *mddev)
6588 {
6589 	if (!md_bitmap_registered(mddev))
6590 		return;
6591 
6592 	mddev->bitmap_ops->destroy(mddev);
6593 	mddev_clear_bitmap_ops(mddev);
6594 }
6595 
md_run(struct mddev * mddev)6596 int md_run(struct mddev *mddev)
6597 {
6598 	int err;
6599 	struct md_rdev *rdev;
6600 	struct md_personality *pers;
6601 	bool nowait = true;
6602 
6603 	if (list_empty(&mddev->disks))
6604 		/* cannot run an array with no devices.. */
6605 		return -EINVAL;
6606 
6607 	if (mddev->pers)
6608 		return -EBUSY;
6609 	/* Cannot run until previous stop completes properly */
6610 	if (mddev->sysfs_active)
6611 		return -EBUSY;
6612 
6613 	/*
6614 	 * Analyze all RAID superblock(s)
6615 	 */
6616 	if (!mddev->raid_disks) {
6617 		if (!mddev->persistent)
6618 			return -EINVAL;
6619 		err = analyze_sbs(mddev);
6620 		if (err)
6621 			return -EINVAL;
6622 	}
6623 
6624 	if (mddev->level != LEVEL_NONE)
6625 		request_module("md-level-%d", mddev->level);
6626 	else if (mddev->clevel[0])
6627 		request_module("md-%s", mddev->clevel);
6628 
6629 	/*
6630 	 * Drop all container device buffers, from now on
6631 	 * the only valid external interface is through the md
6632 	 * device.
6633 	 */
6634 	clear_bit(MD_HAS_SUPERBLOCK, &mddev->flags);
6635 	rdev_for_each(rdev, mddev) {
6636 		if (test_bit(Faulty, &rdev->flags))
6637 			continue;
6638 		sync_blockdev(rdev->bdev);
6639 		invalidate_bdev(rdev->bdev);
6640 		if (mddev->ro != MD_RDONLY && rdev_read_only(rdev)) {
6641 			mddev->ro = MD_RDONLY;
6642 			if (!mddev_is_dm(mddev))
6643 				set_disk_ro(mddev->gendisk, 1);
6644 		}
6645 
6646 		if (rdev->sb_page)
6647 			set_bit(MD_HAS_SUPERBLOCK, &mddev->flags);
6648 
6649 		/* perform some consistency tests on the device.
6650 		 * We don't want the data to overlap the metadata,
6651 		 * Internal Bitmap issues have been handled elsewhere.
6652 		 */
6653 		if (rdev->meta_bdev) {
6654 			/* Nothing to check */;
6655 		} else if (rdev->data_offset < rdev->sb_start) {
6656 			if (mddev->dev_sectors &&
6657 			    rdev->data_offset + mddev->dev_sectors
6658 			    > rdev->sb_start) {
6659 				pr_warn("md: %s: data overlaps metadata\n",
6660 					mdname(mddev));
6661 				return -EINVAL;
6662 			}
6663 		} else {
6664 			if (rdev->sb_start + rdev->sb_size/512
6665 			    > rdev->data_offset) {
6666 				pr_warn("md: %s: metadata overlaps data\n",
6667 					mdname(mddev));
6668 				return -EINVAL;
6669 			}
6670 		}
6671 		sysfs_notify_dirent_safe(rdev->sysfs_state);
6672 		nowait = nowait && bdev_nowait(rdev->bdev);
6673 	}
6674 
6675 	pers = get_pers(mddev->level, mddev->clevel);
6676 	if (!pers)
6677 		return -EINVAL;
6678 	if (mddev->level != pers->head.id) {
6679 		mddev->level = pers->head.id;
6680 		mddev->new_level = pers->head.id;
6681 	}
6682 	strscpy(mddev->clevel, pers->head.name, sizeof(mddev->clevel));
6683 
6684 	if (mddev->reshape_position != MaxSector &&
6685 	    pers->start_reshape == NULL) {
6686 		/* This personality cannot handle reshaping... */
6687 		put_pers(pers);
6688 		return -EINVAL;
6689 	}
6690 
6691 	if (pers->sync_request) {
6692 		/* Warn if this is a potentially silly
6693 		 * configuration.
6694 		 */
6695 		struct md_rdev *rdev2;
6696 		int warned = 0;
6697 
6698 		rdev_for_each(rdev, mddev)
6699 			rdev_for_each(rdev2, mddev) {
6700 				if (rdev < rdev2 &&
6701 				    rdev->bdev->bd_disk ==
6702 				    rdev2->bdev->bd_disk) {
6703 					pr_warn("%s: WARNING: %pg appears to be on the same physical disk as %pg.\n",
6704 						mdname(mddev),
6705 						rdev->bdev,
6706 						rdev2->bdev);
6707 					warned = 1;
6708 				}
6709 			}
6710 
6711 		if (warned)
6712 			pr_warn("True protection against single-disk failure might be compromised.\n");
6713 	}
6714 
6715 	/* dm-raid expect sync_thread to be frozen until resume */
6716 	if (mddev->gendisk)
6717 		mddev->recovery = 0;
6718 
6719 	/* may be over-ridden by personality */
6720 	mddev->resync_max_sectors = mddev->dev_sectors;
6721 
6722 	mddev->ok_start_degraded = start_dirty_degraded;
6723 
6724 	if (start_readonly && md_is_rdwr(mddev))
6725 		mddev->ro = MD_AUTO_READ; /* read-only, but switch on first write */
6726 
6727 	err = pers->run(mddev);
6728 	if (err)
6729 		pr_warn("md: pers->run() failed ...\n");
6730 	else if (pers->size(mddev, 0, 0) < mddev->array_sectors) {
6731 		WARN_ONCE(!mddev->external_size,
6732 			  "%s: default size too small, but 'external_size' not in effect?\n",
6733 			  __func__);
6734 		pr_warn("md: invalid array_size %llu > default size %llu\n",
6735 			(unsigned long long)mddev->array_sectors / 2,
6736 			(unsigned long long)pers->size(mddev, 0, 0) / 2);
6737 		err = -EINVAL;
6738 	}
6739 	if (err == 0 && pers->sync_request &&
6740 	    (mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
6741 		err = md_bitmap_create(mddev);
6742 		if (err)
6743 			pr_warn("%s: failed to create bitmap (%d)\n",
6744 				mdname(mddev), err);
6745 	}
6746 	if (err)
6747 		goto bitmap_abort;
6748 
6749 	if (mddev->bitmap_info.max_write_behind > 0) {
6750 		bool create_pool = false;
6751 
6752 		rdev_for_each(rdev, mddev) {
6753 			if (test_bit(WriteMostly, &rdev->flags) &&
6754 			    rdev_init_serial(rdev))
6755 				create_pool = true;
6756 		}
6757 		if (create_pool && mddev->serial_info_pool == NULL) {
6758 			mddev->serial_info_pool =
6759 				mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
6760 						    sizeof(struct serial_info));
6761 			if (!mddev->serial_info_pool) {
6762 				err = -ENOMEM;
6763 				goto bitmap_abort;
6764 			}
6765 		}
6766 	}
6767 
6768 	if (pers->sync_request) {
6769 		if (mddev->kobj.sd &&
6770 		    sysfs_create_group(&mddev->kobj, &md_redundancy_group))
6771 			pr_warn("md: cannot register extra attributes for %s\n",
6772 				mdname(mddev));
6773 		mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
6774 		mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
6775 		mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
6776 	} else if (mddev->ro == MD_AUTO_READ)
6777 		mddev->ro = MD_RDWR;
6778 
6779 	atomic_set(&mddev->max_corr_read_errors,
6780 		   MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
6781 	mddev->safemode = 0;
6782 	if (mddev_is_clustered(mddev))
6783 		mddev->safemode_delay = 0;
6784 	else
6785 		mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY;
6786 	mddev->in_sync = 1;
6787 	smp_wmb();
6788 	spin_lock(&mddev->lock);
6789 	mddev->pers = pers;
6790 	spin_unlock(&mddev->lock);
6791 	rdev_for_each(rdev, mddev)
6792 		if (rdev->raid_disk >= 0)
6793 			sysfs_link_rdev(mddev, rdev); /* failure here is OK */
6794 
6795 	if (mddev->degraded && md_is_rdwr(mddev))
6796 		/* This ensures that recovering status is reported immediately
6797 		 * via sysfs - until a lack of spares is confirmed.
6798 		 */
6799 		set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
6800 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6801 
6802 	if (mddev->sb_flags)
6803 		md_update_sb(mddev, 0);
6804 
6805 	md_new_event();
6806 	return 0;
6807 
6808 bitmap_abort:
6809 	mddev_detach(mddev);
6810 	if (mddev->private)
6811 		pers->free(mddev, mddev->private);
6812 	mddev->private = NULL;
6813 	put_pers(pers);
6814 	md_bitmap_destroy(mddev);
6815 	return err;
6816 }
6817 EXPORT_SYMBOL_GPL(md_run);
6818 
do_md_run(struct mddev * mddev)6819 int do_md_run(struct mddev *mddev)
6820 {
6821 	int err;
6822 
6823 	set_bit(MD_NOT_READY, &mddev->flags);
6824 	err = md_run(mddev);
6825 	if (err)
6826 		goto out;
6827 
6828 	if (md_bitmap_registered(mddev)) {
6829 		err = mddev->bitmap_ops->load(mddev);
6830 		if (err) {
6831 			md_bitmap_destroy(mddev);
6832 			goto out;
6833 		}
6834 	}
6835 
6836 	if (mddev_is_clustered(mddev))
6837 		md_allow_write(mddev);
6838 
6839 	/* run start up tasks that require md_thread */
6840 	md_start(mddev);
6841 
6842 	md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
6843 
6844 	set_capacity_and_notify(mddev->gendisk, mddev->array_sectors);
6845 	clear_bit(MD_NOT_READY, &mddev->flags);
6846 	mddev->changed = 1;
6847 	kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
6848 	sysfs_notify_dirent_safe(mddev->sysfs_state);
6849 	sysfs_notify_dirent_safe(mddev->sysfs_action);
6850 	sysfs_notify_dirent_safe(mddev->sysfs_degraded);
6851 out:
6852 	clear_bit(MD_NOT_READY, &mddev->flags);
6853 	return err;
6854 }
6855 
md_start(struct mddev * mddev)6856 int md_start(struct mddev *mddev)
6857 {
6858 	int ret = 0;
6859 
6860 	if (mddev->pers->start) {
6861 		set_bit(MD_RECOVERY_WAIT, &mddev->recovery);
6862 		ret = mddev->pers->start(mddev);
6863 		clear_bit(MD_RECOVERY_WAIT, &mddev->recovery);
6864 		md_wakeup_thread(mddev->sync_thread);
6865 	}
6866 	return ret;
6867 }
6868 EXPORT_SYMBOL_GPL(md_start);
6869 
restart_array(struct mddev * mddev)6870 static int restart_array(struct mddev *mddev)
6871 {
6872 	struct gendisk *disk = mddev->gendisk;
6873 	struct md_rdev *rdev;
6874 	bool has_journal = false;
6875 	bool has_readonly = false;
6876 
6877 	/* Complain if it has no devices */
6878 	if (list_empty(&mddev->disks))
6879 		return -ENXIO;
6880 	if (!mddev->pers)
6881 		return -EINVAL;
6882 	if (md_is_rdwr(mddev))
6883 		return -EBUSY;
6884 
6885 	rcu_read_lock();
6886 	rdev_for_each_rcu(rdev, mddev) {
6887 		if (test_bit(Journal, &rdev->flags) &&
6888 		    !test_bit(Faulty, &rdev->flags))
6889 			has_journal = true;
6890 		if (rdev_read_only(rdev))
6891 			has_readonly = true;
6892 	}
6893 	rcu_read_unlock();
6894 	if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !has_journal)
6895 		/* Don't restart rw with journal missing/faulty */
6896 			return -EINVAL;
6897 	if (has_readonly)
6898 		return -EROFS;
6899 
6900 	mddev->safemode = 0;
6901 	mddev->ro = MD_RDWR;
6902 	set_disk_ro(disk, 0);
6903 	pr_debug("md: %s switched to read-write mode.\n", mdname(mddev));
6904 	/* Kick recovery or resync if necessary */
6905 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6906 	md_wakeup_thread(mddev->sync_thread);
6907 	sysfs_notify_dirent_safe(mddev->sysfs_state);
6908 	return 0;
6909 }
6910 
md_clean(struct mddev * mddev)6911 static void md_clean(struct mddev *mddev)
6912 {
6913 	mddev->array_sectors = 0;
6914 	mddev->external_size = 0;
6915 	mddev->dev_sectors = 0;
6916 	mddev->raid_disks = 0;
6917 	mddev->resync_offset = 0;
6918 	mddev->resync_min = 0;
6919 	mddev->resync_max = MaxSector;
6920 	mddev->reshape_position = MaxSector;
6921 	/* we still need mddev->external in export_rdev, do not clear it yet */
6922 	mddev->persistent = 0;
6923 	mddev->level = LEVEL_NONE;
6924 	mddev->clevel[0] = 0;
6925 
6926 	/*
6927 	 * For legacy_async_del_gendisk mode, it can stop the array in the
6928 	 * middle of assembling it, then it still can access the array. So
6929 	 * it needs to clear MD_CLOSING. If not legacy_async_del_gendisk,
6930 	 * it can't open the array again after stopping it. So it doesn't
6931 	 * clear MD_CLOSING.
6932 	 */
6933 	if (legacy_async_del_gendisk && mddev->hold_active) {
6934 		clear_bit(MD_CLOSING, &mddev->flags);
6935 	} else {
6936 		/* if UNTIL_STOP is set, it's cleared here */
6937 		mddev->hold_active = 0;
6938 		/* Don't clear MD_CLOSING, or mddev can be opened again. */
6939 		mddev->flags &= BIT_ULL_MASK(MD_CLOSING);
6940 	}
6941 	mddev->sb_flags = 0;
6942 	mddev->ro = MD_RDWR;
6943 	mddev->metadata_type[0] = 0;
6944 	mddev->chunk_sectors = 0;
6945 	mddev->ctime = mddev->utime = 0;
6946 	mddev->layout = 0;
6947 	mddev->logical_block_size = 0;
6948 	mddev->max_disks = 0;
6949 	mddev->events = 0;
6950 	mddev->can_decrease_events = 0;
6951 	mddev->delta_disks = 0;
6952 	mddev->reshape_backwards = 0;
6953 	mddev->new_level = LEVEL_NONE;
6954 	mddev->new_layout = 0;
6955 	mddev->new_chunk_sectors = 0;
6956 	mddev->curr_resync = MD_RESYNC_NONE;
6957 	atomic64_set(&mddev->resync_mismatches, 0);
6958 	mddev->suspend_lo = mddev->suspend_hi = 0;
6959 	mddev->sync_speed_min = mddev->sync_speed_max = 0;
6960 	mddev->recovery = 0;
6961 	mddev->in_sync = 0;
6962 	mddev->changed = 0;
6963 	mddev->degraded = 0;
6964 	mddev->safemode = 0;
6965 	mddev->private = NULL;
6966 	mddev->cluster_info = NULL;
6967 	mddev->bitmap_info.offset = 0;
6968 	mddev->bitmap_info.default_offset = 0;
6969 	mddev->bitmap_info.default_space = 0;
6970 	mddev->bitmap_info.chunksize = 0;
6971 	mddev->bitmap_info.daemon_sleep = 0;
6972 	mddev->bitmap_info.max_write_behind = 0;
6973 	mddev->bitmap_info.nodes = 0;
6974 }
6975 
__md_stop_writes(struct mddev * mddev)6976 static void __md_stop_writes(struct mddev *mddev)
6977 {
6978 	timer_delete_sync(&mddev->safemode_timer);
6979 
6980 	if (md_is_rdwr(mddev) || !mddev_is_dm(mddev)) {
6981 		if (mddev->pers && mddev->pers->quiesce) {
6982 			mddev->pers->quiesce(mddev, 1);
6983 			mddev->pers->quiesce(mddev, 0);
6984 		}
6985 
6986 		if (md_bitmap_enabled(mddev, true))
6987 			mddev->bitmap_ops->flush(mddev);
6988 	}
6989 
6990 	if (md_is_rdwr(mddev) &&
6991 	    ((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
6992 	     mddev->sb_flags)) {
6993 		/* mark array as shutdown cleanly */
6994 		if (!mddev_is_clustered(mddev))
6995 			mddev->in_sync = 1;
6996 		md_update_sb(mddev, 1);
6997 	}
6998 	/* disable policy to guarantee rdevs free resources for serialization */
6999 	clear_bit(MD_SERIALIZE_POLICY, &mddev->flags);
7000 	mddev_destroy_serial_pool(mddev, NULL);
7001 }
7002 
md_stop_writes(struct mddev * mddev)7003 void md_stop_writes(struct mddev *mddev)
7004 {
7005 	mddev_lock_nointr(mddev);
7006 	set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
7007 	stop_sync_thread(mddev, true);
7008 	__md_stop_writes(mddev);
7009 	mddev_unlock(mddev);
7010 }
7011 EXPORT_SYMBOL_GPL(md_stop_writes);
7012 
mddev_detach(struct mddev * mddev)7013 static void mddev_detach(struct mddev *mddev)
7014 {
7015 	if (md_bitmap_enabled(mddev, false))
7016 		mddev->bitmap_ops->wait_behind_writes(mddev);
7017 	if (mddev->pers && mddev->pers->quiesce && !is_md_suspended(mddev)) {
7018 		mddev->pers->quiesce(mddev, 1);
7019 		mddev->pers->quiesce(mddev, 0);
7020 	}
7021 	md_unregister_thread(mddev, &mddev->thread);
7022 
7023 	/* the unplug fn references 'conf' */
7024 	if (!mddev_is_dm(mddev))
7025 		blk_sync_queue(mddev->gendisk->queue);
7026 }
7027 
__md_stop(struct mddev * mddev)7028 static void __md_stop(struct mddev *mddev)
7029 {
7030 	struct md_personality *pers = mddev->pers;
7031 
7032 	md_bitmap_destroy(mddev);
7033 	mddev_detach(mddev);
7034 	spin_lock(&mddev->lock);
7035 	mddev->pers = NULL;
7036 	spin_unlock(&mddev->lock);
7037 	if (mddev->private)
7038 		pers->free(mddev, mddev->private);
7039 	mddev->private = NULL;
7040 	put_pers(pers);
7041 	clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
7042 }
7043 
md_stop(struct mddev * mddev)7044 void md_stop(struct mddev *mddev)
7045 {
7046 	lockdep_assert_held(&mddev->reconfig_mutex);
7047 
7048 	/* stop the array and free an attached data structures.
7049 	 * This is called from dm-raid
7050 	 */
7051 	__md_stop_writes(mddev);
7052 	__md_stop(mddev);
7053 }
7054 
7055 EXPORT_SYMBOL_GPL(md_stop);
7056 
7057 /* ensure 'mddev->pers' exist before calling md_set_readonly() */
md_set_readonly(struct mddev * mddev)7058 static int md_set_readonly(struct mddev *mddev)
7059 {
7060 	int err = 0;
7061 	int did_freeze = 0;
7062 
7063 	if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
7064 		return -EBUSY;
7065 
7066 	if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
7067 		did_freeze = 1;
7068 		set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
7069 	}
7070 
7071 	stop_sync_thread(mddev, false);
7072 	wait_event(mddev->sb_wait,
7073 		   !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
7074 	mddev_lock_nointr(mddev);
7075 
7076 	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
7077 		pr_warn("md: %s still in use.\n",mdname(mddev));
7078 		err = -EBUSY;
7079 		goto out;
7080 	}
7081 
7082 	__md_stop_writes(mddev);
7083 
7084 	if (mddev->ro == MD_RDONLY) {
7085 		err  = -ENXIO;
7086 		goto out;
7087 	}
7088 
7089 	mddev->ro = MD_RDONLY;
7090 	set_disk_ro(mddev->gendisk, 1);
7091 
7092 out:
7093 	if (!err || did_freeze) {
7094 		clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
7095 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7096 		sysfs_notify_dirent_safe(mddev->sysfs_state);
7097 	}
7098 
7099 	return err;
7100 }
7101 
7102 /* mode:
7103  *   0 - completely stop and dis-assemble array
7104  *   2 - stop but do not disassemble array
7105  */
do_md_stop(struct mddev * mddev,int mode)7106 static int do_md_stop(struct mddev *mddev, int mode)
7107 {
7108 	struct gendisk *disk = mddev->gendisk;
7109 	struct md_rdev *rdev;
7110 	int did_freeze = 0;
7111 
7112 	if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
7113 		did_freeze = 1;
7114 		set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
7115 	}
7116 
7117 	stop_sync_thread(mddev, true);
7118 
7119 	if (mddev->sysfs_active ||
7120 	    test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
7121 		pr_warn("md: %s still in use.\n",mdname(mddev));
7122 		if (did_freeze) {
7123 			clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
7124 			set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7125 		}
7126 		return -EBUSY;
7127 	}
7128 	if (mddev->pers) {
7129 		if (!md_is_rdwr(mddev))
7130 			set_disk_ro(disk, 0);
7131 
7132 		if (mode == 2 && mddev->pers->sync_request &&
7133 		    mddev->to_remove == NULL)
7134 			mddev->to_remove = &md_redundancy_group;
7135 
7136 		__md_stop_writes(mddev);
7137 		__md_stop(mddev);
7138 
7139 		/* tell userspace to handle 'inactive' */
7140 		sysfs_notify_dirent_safe(mddev->sysfs_state);
7141 
7142 		rdev_for_each(rdev, mddev)
7143 			if (rdev->raid_disk >= 0)
7144 				sysfs_unlink_rdev(mddev, rdev);
7145 
7146 		set_capacity_and_notify(disk, 0);
7147 		mddev->changed = 1;
7148 
7149 		if (!md_is_rdwr(mddev))
7150 			mddev->ro = MD_RDWR;
7151 	}
7152 	/*
7153 	 * Free resources if final stop
7154 	 */
7155 	if (mode == 0) {
7156 		pr_info("md: %s stopped.\n", mdname(mddev));
7157 
7158 		if (mddev->bitmap_info.file) {
7159 			struct file *f = mddev->bitmap_info.file;
7160 			spin_lock(&mddev->lock);
7161 			mddev->bitmap_info.file = NULL;
7162 			spin_unlock(&mddev->lock);
7163 			fput(f);
7164 		}
7165 		mddev->bitmap_info.offset = 0;
7166 
7167 		export_array(mddev);
7168 		md_clean(mddev);
7169 		if (!legacy_async_del_gendisk)
7170 			set_bit(MD_DELETED, &mddev->flags);
7171 	}
7172 	md_new_event();
7173 	sysfs_notify_dirent_safe(mddev->sysfs_state);
7174 	return 0;
7175 }
7176 
7177 #ifndef MODULE
autorun_array(struct mddev * mddev)7178 static void autorun_array(struct mddev *mddev)
7179 {
7180 	struct md_rdev *rdev;
7181 	int err;
7182 
7183 	if (list_empty(&mddev->disks))
7184 		return;
7185 
7186 	pr_info("md: running: ");
7187 
7188 	rdev_for_each(rdev, mddev) {
7189 		pr_cont("<%pg>", rdev->bdev);
7190 	}
7191 	pr_cont("\n");
7192 
7193 	err = do_md_run(mddev);
7194 	if (err) {
7195 		pr_warn("md: do_md_run() returned %d\n", err);
7196 		do_md_stop(mddev, 0);
7197 	}
7198 }
7199 
7200 /*
7201  * lets try to run arrays based on all disks that have arrived
7202  * until now. (those are in pending_raid_disks)
7203  *
7204  * the method: pick the first pending disk, collect all disks with
7205  * the same UUID, remove all from the pending list and put them into
7206  * the 'same_array' list. Then order this list based on superblock
7207  * update time (freshest comes first), kick out 'old' disks and
7208  * compare superblocks. If everything's fine then run it.
7209  *
7210  * If "unit" is allocated, then bump its reference count
7211  */
autorun_devices(int part)7212 static void autorun_devices(int part)
7213 {
7214 	struct md_rdev *rdev0, *rdev, *tmp;
7215 	struct mddev *mddev;
7216 
7217 	pr_info("md: autorun ...\n");
7218 	while (!list_empty(&pending_raid_disks)) {
7219 		int unit;
7220 		dev_t dev;
7221 		LIST_HEAD(candidates);
7222 		rdev0 = list_entry(pending_raid_disks.next,
7223 					 struct md_rdev, same_set);
7224 
7225 		pr_debug("md: considering %pg ...\n", rdev0->bdev);
7226 		INIT_LIST_HEAD(&candidates);
7227 		rdev_for_each_list(rdev, tmp, &pending_raid_disks)
7228 			if (super_90_load(rdev, rdev0, 0) >= 0) {
7229 				pr_debug("md:  adding %pg ...\n",
7230 					 rdev->bdev);
7231 				list_move(&rdev->same_set, &candidates);
7232 			}
7233 		/*
7234 		 * now we have a set of devices, with all of them having
7235 		 * mostly sane superblocks. It's time to allocate the
7236 		 * mddev.
7237 		 */
7238 		if (part) {
7239 			dev = MKDEV(mdp_major,
7240 				    rdev0->preferred_minor << MdpMinorShift);
7241 			unit = MINOR(dev) >> MdpMinorShift;
7242 		} else {
7243 			dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
7244 			unit = MINOR(dev);
7245 		}
7246 		if (rdev0->preferred_minor != unit) {
7247 			pr_warn("md: unit number in %pg is bad: %d\n",
7248 				rdev0->bdev, rdev0->preferred_minor);
7249 			break;
7250 		}
7251 
7252 		mddev = md_alloc(dev, NULL);
7253 		if (IS_ERR(mddev))
7254 			break;
7255 
7256 		if (mddev_suspend_and_lock(mddev))
7257 			pr_warn("md: %s locked, cannot run\n", mdname(mddev));
7258 		else if (mddev->raid_disks || mddev->major_version
7259 			 || !list_empty(&mddev->disks)) {
7260 			pr_warn("md: %s already running, cannot run %pg\n",
7261 				mdname(mddev), rdev0->bdev);
7262 			mddev_unlock_and_resume(mddev);
7263 		} else {
7264 			pr_debug("md: created %s\n", mdname(mddev));
7265 			mddev->persistent = 1;
7266 			rdev_for_each_list(rdev, tmp, &candidates) {
7267 				list_del_init(&rdev->same_set);
7268 				if (bind_rdev_to_array(rdev, mddev))
7269 					export_rdev(rdev);
7270 			}
7271 			autorun_array(mddev);
7272 			mddev_unlock_and_resume(mddev);
7273 		}
7274 		/* on success, candidates will be empty, on error
7275 		 * it won't...
7276 		 */
7277 		rdev_for_each_list(rdev, tmp, &candidates) {
7278 			list_del_init(&rdev->same_set);
7279 			export_rdev(rdev);
7280 		}
7281 		mddev_put(mddev);
7282 	}
7283 	pr_info("md: ... autorun DONE.\n");
7284 }
7285 #endif /* !MODULE */
7286 
get_version(void __user * arg)7287 static int get_version(void __user *arg)
7288 {
7289 	mdu_version_t ver;
7290 
7291 	ver.major = MD_MAJOR_VERSION;
7292 	ver.minor = MD_MINOR_VERSION;
7293 	ver.patchlevel = MD_PATCHLEVEL_VERSION;
7294 
7295 	if (copy_to_user(arg, &ver, sizeof(ver)))
7296 		return -EFAULT;
7297 
7298 	return 0;
7299 }
7300 
get_array_info(struct mddev * mddev,void __user * arg)7301 static int get_array_info(struct mddev *mddev, void __user *arg)
7302 {
7303 	mdu_array_info_t info;
7304 	int nr,working,insync,failed,spare;
7305 	struct md_rdev *rdev;
7306 
7307 	nr = working = insync = failed = spare = 0;
7308 	rcu_read_lock();
7309 	rdev_for_each_rcu(rdev, mddev) {
7310 		nr++;
7311 		if (test_bit(Faulty, &rdev->flags))
7312 			failed++;
7313 		else {
7314 			working++;
7315 			if (test_bit(In_sync, &rdev->flags))
7316 				insync++;
7317 			else if (test_bit(Journal, &rdev->flags))
7318 				/* TODO: add journal count to md_u.h */
7319 				;
7320 			else
7321 				spare++;
7322 		}
7323 	}
7324 	rcu_read_unlock();
7325 
7326 	info.major_version = mddev->major_version;
7327 	info.minor_version = mddev->minor_version;
7328 	info.patch_version = MD_PATCHLEVEL_VERSION;
7329 	info.ctime         = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
7330 	info.level         = mddev->level;
7331 	info.size          = mddev->dev_sectors / 2;
7332 	if (info.size != mddev->dev_sectors / 2) /* overflow */
7333 		info.size = -1;
7334 	info.nr_disks      = nr;
7335 	info.raid_disks    = mddev->raid_disks;
7336 	info.md_minor      = mddev->md_minor;
7337 	info.not_persistent= !mddev->persistent;
7338 
7339 	info.utime         = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
7340 	info.state         = 0;
7341 	if (mddev->in_sync)
7342 		info.state = (1<<MD_SB_CLEAN);
7343 	if (mddev->bitmap && mddev->bitmap_info.offset)
7344 		info.state |= (1<<MD_SB_BITMAP_PRESENT);
7345 	if (mddev_is_clustered(mddev))
7346 		info.state |= (1<<MD_SB_CLUSTERED);
7347 	info.active_disks  = insync;
7348 	info.working_disks = working;
7349 	info.failed_disks  = failed;
7350 	info.spare_disks   = spare;
7351 
7352 	info.layout        = mddev->layout;
7353 	info.chunk_size    = mddev->chunk_sectors << 9;
7354 
7355 	if (copy_to_user(arg, &info, sizeof(info)))
7356 		return -EFAULT;
7357 
7358 	return 0;
7359 }
7360 
get_bitmap_file(struct mddev * mddev,void __user * arg)7361 static int get_bitmap_file(struct mddev *mddev, void __user * arg)
7362 {
7363 	mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
7364 	char *ptr;
7365 	int err;
7366 
7367 	file = kzalloc_obj(*file, GFP_NOIO);
7368 	if (!file)
7369 		return -ENOMEM;
7370 
7371 	err = 0;
7372 	spin_lock(&mddev->lock);
7373 	/* bitmap enabled */
7374 	if (mddev->bitmap_info.file) {
7375 		ptr = file_path(mddev->bitmap_info.file, file->pathname,
7376 				sizeof(file->pathname));
7377 		if (IS_ERR(ptr))
7378 			err = PTR_ERR(ptr);
7379 		else
7380 			memmove(file->pathname, ptr,
7381 				sizeof(file->pathname)-(ptr-file->pathname));
7382 	}
7383 	spin_unlock(&mddev->lock);
7384 
7385 	if (err == 0 &&
7386 	    copy_to_user(arg, file, sizeof(*file)))
7387 		err = -EFAULT;
7388 
7389 	kfree(file);
7390 	return err;
7391 }
7392 
get_disk_info(struct mddev * mddev,void __user * arg)7393 static int get_disk_info(struct mddev *mddev, void __user * arg)
7394 {
7395 	mdu_disk_info_t info;
7396 	struct md_rdev *rdev;
7397 
7398 	if (copy_from_user(&info, arg, sizeof(info)))
7399 		return -EFAULT;
7400 
7401 	rcu_read_lock();
7402 	rdev = md_find_rdev_nr_rcu(mddev, info.number);
7403 	if (rdev) {
7404 		info.major = MAJOR(rdev->bdev->bd_dev);
7405 		info.minor = MINOR(rdev->bdev->bd_dev);
7406 		info.raid_disk = rdev->raid_disk;
7407 		info.state = 0;
7408 		if (test_bit(Faulty, &rdev->flags))
7409 			info.state |= (1<<MD_DISK_FAULTY);
7410 		else if (test_bit(In_sync, &rdev->flags)) {
7411 			info.state |= (1<<MD_DISK_ACTIVE);
7412 			info.state |= (1<<MD_DISK_SYNC);
7413 		}
7414 		if (test_bit(Journal, &rdev->flags))
7415 			info.state |= (1<<MD_DISK_JOURNAL);
7416 		if (test_bit(WriteMostly, &rdev->flags))
7417 			info.state |= (1<<MD_DISK_WRITEMOSTLY);
7418 		if (test_bit(FailFast, &rdev->flags))
7419 			info.state |= (1<<MD_DISK_FAILFAST);
7420 	} else {
7421 		info.major = info.minor = 0;
7422 		info.raid_disk = -1;
7423 		info.state = (1<<MD_DISK_REMOVED);
7424 	}
7425 	rcu_read_unlock();
7426 
7427 	if (copy_to_user(arg, &info, sizeof(info)))
7428 		return -EFAULT;
7429 
7430 	return 0;
7431 }
7432 
md_add_new_disk(struct mddev * mddev,struct mdu_disk_info_s * info)7433 int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
7434 {
7435 	struct md_rdev *rdev;
7436 	dev_t dev = MKDEV(info->major,info->minor);
7437 
7438 	if (mddev_is_clustered(mddev) &&
7439 		!(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) {
7440 		pr_warn("%s: Cannot add to clustered mddev.\n",
7441 			mdname(mddev));
7442 		return -EINVAL;
7443 	}
7444 
7445 	if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
7446 		return -EOVERFLOW;
7447 
7448 	if (!mddev->raid_disks) {
7449 		int err;
7450 		/* expecting a device which has a superblock */
7451 		rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
7452 		if (IS_ERR(rdev)) {
7453 			pr_warn("md: md_import_device returned %ld\n",
7454 				PTR_ERR(rdev));
7455 			return PTR_ERR(rdev);
7456 		}
7457 		if (!list_empty(&mddev->disks)) {
7458 			struct md_rdev *rdev0
7459 				= list_entry(mddev->disks.next,
7460 					     struct md_rdev, same_set);
7461 			err = super_types[mddev->major_version]
7462 				.load_super(rdev, rdev0, mddev->minor_version);
7463 			if (err < 0) {
7464 				pr_warn("md: %pg has different UUID to %pg\n",
7465 					rdev->bdev,
7466 					rdev0->bdev);
7467 				export_rdev(rdev);
7468 				return -EINVAL;
7469 			}
7470 		}
7471 		err = bind_rdev_to_array(rdev, mddev);
7472 		if (err)
7473 			export_rdev(rdev);
7474 		return err;
7475 	}
7476 
7477 	/*
7478 	 * md_add_new_disk can be used once the array is assembled
7479 	 * to add "hot spares".  They must already have a superblock
7480 	 * written
7481 	 */
7482 	if (mddev->pers) {
7483 		int err;
7484 		if (!mddev->pers->hot_add_disk) {
7485 			pr_warn("%s: personality does not support diskops!\n",
7486 				mdname(mddev));
7487 			return -EINVAL;
7488 		}
7489 		if (mddev->persistent)
7490 			rdev = md_import_device(dev, mddev->major_version,
7491 						mddev->minor_version);
7492 		else
7493 			rdev = md_import_device(dev, -1, -1);
7494 		if (IS_ERR(rdev)) {
7495 			pr_warn("md: md_import_device returned %ld\n",
7496 				PTR_ERR(rdev));
7497 			return PTR_ERR(rdev);
7498 		}
7499 		/* set saved_raid_disk if appropriate */
7500 		if (!mddev->persistent) {
7501 			if (info->state & (1<<MD_DISK_SYNC)  &&
7502 			    info->raid_disk < mddev->raid_disks) {
7503 				rdev->raid_disk = info->raid_disk;
7504 				clear_bit(Bitmap_sync, &rdev->flags);
7505 			} else
7506 				rdev->raid_disk = -1;
7507 			rdev->saved_raid_disk = rdev->raid_disk;
7508 		} else
7509 			super_types[mddev->major_version].
7510 				validate_super(mddev, NULL/*freshest*/, rdev);
7511 		if ((info->state & (1<<MD_DISK_SYNC)) &&
7512 		     rdev->raid_disk != info->raid_disk) {
7513 			/* This was a hot-add request, but events doesn't
7514 			 * match, so reject it.
7515 			 */
7516 			export_rdev(rdev);
7517 			return -EINVAL;
7518 		}
7519 
7520 		clear_bit(In_sync, &rdev->flags); /* just to be sure */
7521 		if (info->state & (1<<MD_DISK_WRITEMOSTLY))
7522 			set_bit(WriteMostly, &rdev->flags);
7523 		else
7524 			clear_bit(WriteMostly, &rdev->flags);
7525 		if (info->state & (1<<MD_DISK_FAILFAST))
7526 			set_bit(FailFast, &rdev->flags);
7527 		else
7528 			clear_bit(FailFast, &rdev->flags);
7529 
7530 		if (info->state & (1<<MD_DISK_JOURNAL)) {
7531 			struct md_rdev *rdev2;
7532 			bool has_journal = false;
7533 
7534 			/* make sure no existing journal disk */
7535 			rdev_for_each(rdev2, mddev) {
7536 				if (test_bit(Journal, &rdev2->flags)) {
7537 					has_journal = true;
7538 					break;
7539 				}
7540 			}
7541 			if (has_journal || mddev->bitmap) {
7542 				export_rdev(rdev);
7543 				return -EBUSY;
7544 			}
7545 			set_bit(Journal, &rdev->flags);
7546 		}
7547 		/*
7548 		 * check whether the device shows up in other nodes
7549 		 */
7550 		if (mddev_is_clustered(mddev)) {
7551 			if (info->state & (1 << MD_DISK_CANDIDATE))
7552 				set_bit(Candidate, &rdev->flags);
7553 			else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) {
7554 				/* --add initiated by this node */
7555 				err = mddev->cluster_ops->add_new_disk(mddev, rdev);
7556 				if (err) {
7557 					export_rdev(rdev);
7558 					return err;
7559 				}
7560 			}
7561 		}
7562 
7563 		rdev->raid_disk = -1;
7564 		err = bind_rdev_to_array(rdev, mddev);
7565 
7566 		if (err)
7567 			export_rdev(rdev);
7568 
7569 		if (mddev_is_clustered(mddev)) {
7570 			if (info->state & (1 << MD_DISK_CANDIDATE)) {
7571 				if (!err) {
7572 					err = mddev->cluster_ops->new_disk_ack(
7573 							mddev, err == 0);
7574 					if (err)
7575 						md_kick_rdev_from_array(rdev);
7576 				}
7577 			} else {
7578 				if (err)
7579 					mddev->cluster_ops->add_new_disk_cancel(mddev);
7580 				else
7581 					err = add_bound_rdev(rdev);
7582 			}
7583 
7584 		} else if (!err)
7585 			err = add_bound_rdev(rdev);
7586 
7587 		return err;
7588 	}
7589 
7590 	/* otherwise, md_add_new_disk is only allowed
7591 	 * for major_version==0 superblocks
7592 	 */
7593 	if (mddev->major_version != 0) {
7594 		pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev));
7595 		return -EINVAL;
7596 	}
7597 
7598 	if (!(info->state & (1<<MD_DISK_FAULTY))) {
7599 		int err;
7600 		rdev = md_import_device(dev, -1, 0);
7601 		if (IS_ERR(rdev)) {
7602 			pr_warn("md: error, md_import_device() returned %ld\n",
7603 				PTR_ERR(rdev));
7604 			return PTR_ERR(rdev);
7605 		}
7606 		rdev->desc_nr = info->number;
7607 		if (info->raid_disk < mddev->raid_disks)
7608 			rdev->raid_disk = info->raid_disk;
7609 		else
7610 			rdev->raid_disk = -1;
7611 
7612 		if (rdev->raid_disk < mddev->raid_disks)
7613 			if (info->state & (1<<MD_DISK_SYNC))
7614 				set_bit(In_sync, &rdev->flags);
7615 
7616 		if (info->state & (1<<MD_DISK_WRITEMOSTLY))
7617 			set_bit(WriteMostly, &rdev->flags);
7618 		if (info->state & (1<<MD_DISK_FAILFAST))
7619 			set_bit(FailFast, &rdev->flags);
7620 
7621 		if (!mddev->persistent) {
7622 			pr_debug("md: nonpersistent superblock ...\n");
7623 			rdev->sb_start = bdev_nr_sectors(rdev->bdev);
7624 		} else
7625 			rdev->sb_start = calc_dev_sboffset(rdev);
7626 		rdev->sectors = rdev->sb_start;
7627 
7628 		err = bind_rdev_to_array(rdev, mddev);
7629 		if (err) {
7630 			export_rdev(rdev);
7631 			return err;
7632 		}
7633 	}
7634 
7635 	return 0;
7636 }
7637 
hot_remove_disk(struct mddev * mddev,dev_t dev)7638 static int hot_remove_disk(struct mddev *mddev, dev_t dev)
7639 {
7640 	struct md_rdev *rdev;
7641 
7642 	if (!mddev->pers)
7643 		return -ENODEV;
7644 
7645 	rdev = find_rdev(mddev, dev);
7646 	if (!rdev)
7647 		return -ENXIO;
7648 
7649 	if (rdev->raid_disk < 0)
7650 		goto kick_rdev;
7651 
7652 	clear_bit(Blocked, &rdev->flags);
7653 	remove_and_add_spares(mddev, rdev);
7654 
7655 	if (rdev->raid_disk >= 0)
7656 		goto busy;
7657 
7658 kick_rdev:
7659 	if (mddev_is_clustered(mddev) &&
7660 	    mddev->cluster_ops->remove_disk(mddev, rdev))
7661 		goto busy;
7662 
7663 	md_kick_rdev_from_array(rdev);
7664 	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
7665 	if (!mddev->thread)
7666 		md_update_sb(mddev, 1);
7667 	md_new_event();
7668 
7669 	return 0;
7670 busy:
7671 	pr_debug("md: cannot remove active disk %pg from %s ...\n",
7672 		 rdev->bdev, mdname(mddev));
7673 	return -EBUSY;
7674 }
7675 
hot_add_disk(struct mddev * mddev,dev_t dev)7676 static int hot_add_disk(struct mddev *mddev, dev_t dev)
7677 {
7678 	int err;
7679 	struct md_rdev *rdev;
7680 
7681 	if (!mddev->pers)
7682 		return -ENODEV;
7683 
7684 	if (mddev->major_version != 0) {
7685 		pr_warn("%s: HOT_ADD may only be used with version-0 superblocks.\n",
7686 			mdname(mddev));
7687 		return -EINVAL;
7688 	}
7689 	if (!mddev->pers->hot_add_disk) {
7690 		pr_warn("%s: personality does not support diskops!\n",
7691 			mdname(mddev));
7692 		return -EINVAL;
7693 	}
7694 
7695 	rdev = md_import_device(dev, -1, 0);
7696 	if (IS_ERR(rdev)) {
7697 		pr_warn("md: error, md_import_device() returned %ld\n",
7698 			PTR_ERR(rdev));
7699 		return -EINVAL;
7700 	}
7701 
7702 	if (mddev->persistent)
7703 		rdev->sb_start = calc_dev_sboffset(rdev);
7704 	else
7705 		rdev->sb_start = bdev_nr_sectors(rdev->bdev);
7706 
7707 	rdev->sectors = rdev->sb_start;
7708 
7709 	if (test_bit(Faulty, &rdev->flags)) {
7710 		pr_warn("md: can not hot-add faulty %pg disk to %s!\n",
7711 			rdev->bdev, mdname(mddev));
7712 		err = -EINVAL;
7713 		goto abort_export;
7714 	}
7715 
7716 	clear_bit(In_sync, &rdev->flags);
7717 	rdev->desc_nr = -1;
7718 	rdev->saved_raid_disk = -1;
7719 	err = bind_rdev_to_array(rdev, mddev);
7720 	if (err)
7721 		goto abort_export;
7722 
7723 	/*
7724 	 * The rest should better be atomic, we can have disk failures
7725 	 * noticed in interrupt contexts ...
7726 	 */
7727 
7728 	rdev->raid_disk = -1;
7729 
7730 	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
7731 	if (!mddev->thread)
7732 		md_update_sb(mddev, 1);
7733 	/*
7734 	 * Kick recovery, maybe this spare has to be added to the
7735 	 * array immediately.
7736 	 */
7737 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7738 	md_new_event();
7739 	return 0;
7740 
7741 abort_export:
7742 	export_rdev(rdev);
7743 	return err;
7744 }
7745 
set_bitmap_file(struct mddev * mddev,int fd)7746 static int set_bitmap_file(struct mddev *mddev, int fd)
7747 {
7748 	int err = 0;
7749 
7750 	if (!md_bitmap_registered(mddev))
7751 		return -EINVAL;
7752 
7753 	if (mddev->pers) {
7754 		if (!mddev->pers->quiesce || !mddev->thread)
7755 			return -EBUSY;
7756 		if (mddev->recovery || mddev->sync_thread)
7757 			return -EBUSY;
7758 		/* we should be able to change the bitmap.. */
7759 	}
7760 
7761 	if (fd >= 0) {
7762 		struct inode *inode;
7763 		struct file *f;
7764 
7765 		if (mddev->bitmap || mddev->bitmap_info.file)
7766 			return -EEXIST; /* cannot add when bitmap is present */
7767 
7768 		if (!IS_ENABLED(CONFIG_MD_BITMAP_FILE)) {
7769 			pr_warn("%s: bitmap files not supported by this kernel\n",
7770 				mdname(mddev));
7771 			return -EINVAL;
7772 		}
7773 		pr_warn("%s: using deprecated bitmap file support\n",
7774 			mdname(mddev));
7775 
7776 		f = fget(fd);
7777 
7778 		if (f == NULL) {
7779 			pr_warn("%s: error: failed to get bitmap file\n",
7780 				mdname(mddev));
7781 			return -EBADF;
7782 		}
7783 
7784 		inode = f->f_mapping->host;
7785 		if (!S_ISREG(inode->i_mode)) {
7786 			pr_warn("%s: error: bitmap file must be a regular file\n",
7787 				mdname(mddev));
7788 			err = -EBADF;
7789 		} else if (!(f->f_mode & FMODE_WRITE)) {
7790 			pr_warn("%s: error: bitmap file must open for write\n",
7791 				mdname(mddev));
7792 			err = -EBADF;
7793 		} else if (atomic_read(&inode->i_writecount) != 1) {
7794 			pr_warn("%s: error: bitmap file is already in use\n",
7795 				mdname(mddev));
7796 			err = -EBUSY;
7797 		}
7798 		if (err) {
7799 			fput(f);
7800 			return err;
7801 		}
7802 		mddev->bitmap_info.file = f;
7803 		mddev->bitmap_info.offset = 0; /* file overrides offset */
7804 	} else if (mddev->bitmap == NULL)
7805 		return -ENOENT; /* cannot remove what isn't there */
7806 	err = 0;
7807 	if (mddev->pers) {
7808 		if (fd >= 0) {
7809 			err = md_bitmap_create(mddev);
7810 			if (!err)
7811 				err = mddev->bitmap_ops->load(mddev);
7812 
7813 			if (err) {
7814 				md_bitmap_destroy(mddev);
7815 				fd = -1;
7816 			}
7817 		} else if (fd < 0) {
7818 			md_bitmap_destroy(mddev);
7819 		}
7820 	}
7821 
7822 	if (fd < 0) {
7823 		struct file *f = mddev->bitmap_info.file;
7824 		if (f) {
7825 			spin_lock(&mddev->lock);
7826 			mddev->bitmap_info.file = NULL;
7827 			spin_unlock(&mddev->lock);
7828 			fput(f);
7829 		}
7830 	}
7831 
7832 	return err;
7833 }
7834 
7835 /*
7836  * md_set_array_info is used two different ways
7837  * The original usage is when creating a new array.
7838  * In this usage, raid_disks is > 0 and it together with
7839  *  level, size, not_persistent,layout,chunksize determine the
7840  *  shape of the array.
7841  *  This will always create an array with a type-0.90.0 superblock.
7842  * The newer usage is when assembling an array.
7843  *  In this case raid_disks will be 0, and the major_version field is
7844  *  use to determine which style super-blocks are to be found on the devices.
7845  *  The minor and patch _version numbers are also kept incase the
7846  *  super_block handler wishes to interpret them.
7847  */
md_set_array_info(struct mddev * mddev,struct mdu_array_info_s * info)7848 int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info)
7849 {
7850 	if (info->raid_disks == 0) {
7851 		/* just setting version number for superblock loading */
7852 		if (info->major_version < 0 ||
7853 		    info->major_version >= ARRAY_SIZE(super_types) ||
7854 		    super_types[info->major_version].name == NULL) {
7855 			/* maybe try to auto-load a module? */
7856 			pr_warn("md: superblock version %d not known\n",
7857 				info->major_version);
7858 			return -EINVAL;
7859 		}
7860 		mddev->major_version = info->major_version;
7861 		mddev->minor_version = info->minor_version;
7862 		mddev->patch_version = info->patch_version;
7863 		mddev->persistent = !info->not_persistent;
7864 		/* ensure mddev_put doesn't delete this now that there
7865 		 * is some minimal configuration.
7866 		 */
7867 		mddev->ctime         = ktime_get_real_seconds();
7868 		return 0;
7869 	}
7870 	mddev->major_version = MD_MAJOR_VERSION;
7871 	mddev->minor_version = MD_MINOR_VERSION;
7872 	mddev->patch_version = MD_PATCHLEVEL_VERSION;
7873 	mddev->ctime         = ktime_get_real_seconds();
7874 
7875 	mddev->level         = info->level;
7876 	mddev->clevel[0]     = 0;
7877 	mddev->dev_sectors   = 2 * (sector_t)info->size;
7878 	mddev->raid_disks    = info->raid_disks;
7879 	/* don't set md_minor, it is determined by which /dev/md* was
7880 	 * openned
7881 	 */
7882 	if (info->state & (1<<MD_SB_CLEAN))
7883 		mddev->resync_offset = MaxSector;
7884 	else
7885 		mddev->resync_offset = 0;
7886 	mddev->persistent    = ! info->not_persistent;
7887 	mddev->external	     = 0;
7888 
7889 	mddev->layout        = info->layout;
7890 	if (mddev->level == 0)
7891 		/* Cannot trust RAID0 layout info here */
7892 		mddev->layout = -1;
7893 	mddev->chunk_sectors = info->chunk_size >> 9;
7894 
7895 	if (mddev->persistent) {
7896 		mddev->max_disks = MD_SB_DISKS;
7897 		mddev->flags = 0;
7898 		mddev->sb_flags = 0;
7899 	}
7900 	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
7901 
7902 	mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
7903 	mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
7904 	mddev->bitmap_info.offset = 0;
7905 
7906 	mddev->reshape_position = MaxSector;
7907 
7908 	/*
7909 	 * Generate a 128 bit UUID
7910 	 */
7911 	get_random_bytes(mddev->uuid, 16);
7912 
7913 	mddev->new_level = mddev->level;
7914 	mddev->new_chunk_sectors = mddev->chunk_sectors;
7915 	mddev->new_layout = mddev->layout;
7916 	mddev->delta_disks = 0;
7917 	mddev->reshape_backwards = 0;
7918 
7919 	return 0;
7920 }
7921 
md_set_array_sectors(struct mddev * mddev,sector_t array_sectors)7922 void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
7923 {
7924 	lockdep_assert_held(&mddev->reconfig_mutex);
7925 
7926 	if (mddev->external_size)
7927 		return;
7928 
7929 	mddev->array_sectors = array_sectors;
7930 }
7931 EXPORT_SYMBOL(md_set_array_sectors);
7932 
update_size(struct mddev * mddev,sector_t num_sectors)7933 static int update_size(struct mddev *mddev, sector_t num_sectors)
7934 {
7935 	struct md_rdev *rdev;
7936 	int rv;
7937 	int fit = (num_sectors == 0);
7938 	sector_t old_dev_sectors = mddev->dev_sectors;
7939 
7940 	if (mddev->pers->resize == NULL)
7941 		return -EINVAL;
7942 	/* The "num_sectors" is the number of sectors of each device that
7943 	 * is used.  This can only make sense for arrays with redundancy.
7944 	 * linear and raid0 always use whatever space is available. We can only
7945 	 * consider changing this number if no resync or reconstruction is
7946 	 * happening, and if the new size is acceptable. It must fit before the
7947 	 * sb_start or, if that is <data_offset, it must fit before the size
7948 	 * of each device.  If num_sectors is zero, we find the largest size
7949 	 * that fits.
7950 	 */
7951 	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
7952 		return -EBUSY;
7953 	if (!md_is_rdwr(mddev))
7954 		return -EROFS;
7955 
7956 	rdev_for_each(rdev, mddev) {
7957 		sector_t avail = rdev->sectors;
7958 
7959 		if (fit && (num_sectors == 0 || num_sectors > avail))
7960 			num_sectors = avail;
7961 		if (avail < num_sectors)
7962 			return -ENOSPC;
7963 	}
7964 	rv = mddev->pers->resize(mddev, num_sectors);
7965 	if (!rv) {
7966 		if (mddev_is_clustered(mddev))
7967 			mddev->cluster_ops->update_size(mddev, old_dev_sectors);
7968 		else if (!mddev_is_dm(mddev))
7969 			set_capacity_and_notify(mddev->gendisk,
7970 						mddev->array_sectors);
7971 	}
7972 	return rv;
7973 }
7974 
update_raid_disks(struct mddev * mddev,int raid_disks)7975 static int update_raid_disks(struct mddev *mddev, int raid_disks)
7976 {
7977 	int rv;
7978 	struct md_rdev *rdev;
7979 	/* change the number of raid disks */
7980 	if (mddev->pers->check_reshape == NULL)
7981 		return -EINVAL;
7982 	if (!md_is_rdwr(mddev))
7983 		return -EROFS;
7984 	if (raid_disks <= 0 ||
7985 	    (mddev->max_disks && raid_disks >= mddev->max_disks))
7986 		return -EINVAL;
7987 	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
7988 	    test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) ||
7989 	    mddev->reshape_position != MaxSector)
7990 		return -EBUSY;
7991 
7992 	rdev_for_each(rdev, mddev) {
7993 		if (mddev->raid_disks < raid_disks &&
7994 		    rdev->data_offset < rdev->new_data_offset)
7995 			return -EINVAL;
7996 		if (mddev->raid_disks > raid_disks &&
7997 		    rdev->data_offset > rdev->new_data_offset)
7998 			return -EINVAL;
7999 	}
8000 
8001 	mddev->delta_disks = raid_disks - mddev->raid_disks;
8002 	if (mddev->delta_disks < 0)
8003 		mddev->reshape_backwards = 1;
8004 	else if (mddev->delta_disks > 0)
8005 		mddev->reshape_backwards = 0;
8006 
8007 	rv = mddev->pers->check_reshape(mddev);
8008 	if (rv < 0) {
8009 		mddev->delta_disks = 0;
8010 		mddev->reshape_backwards = 0;
8011 	}
8012 	return rv;
8013 }
8014 
get_cluster_ops(struct mddev * mddev)8015 static int get_cluster_ops(struct mddev *mddev)
8016 {
8017 	xa_lock(&md_submodule);
8018 	mddev->cluster_ops = xa_load(&md_submodule, ID_CLUSTER);
8019 	if (mddev->cluster_ops &&
8020 	    !try_module_get(mddev->cluster_ops->head.owner))
8021 		mddev->cluster_ops = NULL;
8022 	xa_unlock(&md_submodule);
8023 
8024 	return mddev->cluster_ops == NULL ? -ENOENT : 0;
8025 }
8026 
put_cluster_ops(struct mddev * mddev)8027 static void put_cluster_ops(struct mddev *mddev)
8028 {
8029 	if (!mddev->cluster_ops)
8030 		return;
8031 
8032 	mddev->cluster_ops->leave(mddev);
8033 	module_put(mddev->cluster_ops->head.owner);
8034 	mddev->cluster_ops = NULL;
8035 }
8036 
8037 /*
8038  * update_array_info is used to change the configuration of an
8039  * on-line array.
8040  * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
8041  * fields in the info are checked against the array.
8042  * Any differences that cannot be handled will cause an error.
8043  * Normally, only one change can be managed at a time.
8044  */
update_array_info(struct mddev * mddev,mdu_array_info_t * info)8045 static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
8046 {
8047 	int rv = 0;
8048 	int cnt = 0;
8049 	int state = 0;
8050 
8051 	/* calculate expected state,ignoring low bits */
8052 	if (mddev->bitmap && mddev->bitmap_info.offset)
8053 		state |= (1 << MD_SB_BITMAP_PRESENT);
8054 
8055 	if (mddev->major_version != info->major_version ||
8056 	    mddev->minor_version != info->minor_version ||
8057 /*	    mddev->patch_version != info->patch_version || */
8058 	    mddev->ctime         != info->ctime         ||
8059 	    mddev->level         != info->level         ||
8060 /*	    mddev->layout        != info->layout        || */
8061 	    mddev->persistent	 != !info->not_persistent ||
8062 	    mddev->chunk_sectors != info->chunk_size >> 9 ||
8063 	    /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
8064 	    ((state^info->state) & 0xfffffe00)
8065 		)
8066 		return -EINVAL;
8067 	/* Check there is only one change */
8068 	if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
8069 		cnt++;
8070 	if (mddev->raid_disks != info->raid_disks)
8071 		cnt++;
8072 	if (mddev->layout != info->layout)
8073 		cnt++;
8074 	if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
8075 		cnt++;
8076 	if (cnt == 0)
8077 		return 0;
8078 	if (cnt > 1)
8079 		return -EINVAL;
8080 
8081 	if (mddev->layout != info->layout) {
8082 		/* Change layout
8083 		 * we don't need to do anything at the md level, the
8084 		 * personality will take care of it all.
8085 		 */
8086 		if (mddev->pers->check_reshape == NULL)
8087 			return -EINVAL;
8088 		else {
8089 			mddev->new_layout = info->layout;
8090 			rv = mddev->pers->check_reshape(mddev);
8091 			if (rv)
8092 				mddev->new_layout = mddev->layout;
8093 			return rv;
8094 		}
8095 	}
8096 	if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
8097 		rv = update_size(mddev, (sector_t)info->size * 2);
8098 
8099 	if (mddev->raid_disks    != info->raid_disks)
8100 		rv = update_raid_disks(mddev, info->raid_disks);
8101 
8102 	if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
8103 		if (mddev->pers->quiesce == NULL || mddev->thread == NULL) {
8104 			rv = -EINVAL;
8105 			goto err;
8106 		}
8107 		if (mddev->recovery || mddev->sync_thread) {
8108 			rv = -EBUSY;
8109 			goto err;
8110 		}
8111 		if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
8112 			/* add the bitmap */
8113 			if (mddev->bitmap) {
8114 				rv = -EEXIST;
8115 				goto err;
8116 			}
8117 			if (mddev->bitmap_info.default_offset == 0) {
8118 				rv = -EINVAL;
8119 				goto err;
8120 			}
8121 			mddev->bitmap_info.offset =
8122 				mddev->bitmap_info.default_offset;
8123 			mddev->bitmap_info.space =
8124 				mddev->bitmap_info.default_space;
8125 			rv = md_bitmap_create(mddev);
8126 			if (!rv)
8127 				rv = mddev->bitmap_ops->load(mddev);
8128 
8129 			if (rv)
8130 				md_bitmap_destroy(mddev);
8131 		} else {
8132 			struct md_bitmap_stats stats;
8133 
8134 			rv = mddev->bitmap_ops->get_stats(mddev->bitmap, &stats);
8135 			if (rv)
8136 				goto err;
8137 
8138 			if (stats.file) {
8139 				rv = -EINVAL;
8140 				goto err;
8141 			}
8142 
8143 			if (mddev->bitmap_info.nodes) {
8144 				/* hold PW on all the bitmap lock */
8145 				if (mddev->cluster_ops->lock_all_bitmaps(mddev) <= 0) {
8146 					pr_warn("md: can't change bitmap to none since the array is in use by more than one node\n");
8147 					rv = -EPERM;
8148 					mddev->cluster_ops->unlock_all_bitmaps(mddev);
8149 					goto err;
8150 				}
8151 
8152 				mddev->bitmap_info.nodes = 0;
8153 				put_cluster_ops(mddev);
8154 				mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY;
8155 			}
8156 			md_bitmap_destroy(mddev);
8157 			mddev->bitmap_info.offset = 0;
8158 		}
8159 	}
8160 	md_update_sb(mddev, 1);
8161 	return rv;
8162 err:
8163 	return rv;
8164 }
8165 
set_disk_faulty(struct mddev * mddev,dev_t dev)8166 static int set_disk_faulty(struct mddev *mddev, dev_t dev)
8167 {
8168 	struct md_rdev *rdev;
8169 	int err = 0;
8170 
8171 	if (mddev->pers == NULL)
8172 		return -ENODEV;
8173 
8174 	rcu_read_lock();
8175 	rdev = md_find_rdev_rcu(mddev, dev);
8176 	if (!rdev)
8177 		err =  -ENODEV;
8178 	else {
8179 		md_error(mddev, rdev);
8180 		if (test_bit(MD_BROKEN, &mddev->flags))
8181 			err = -EBUSY;
8182 	}
8183 	rcu_read_unlock();
8184 	return err;
8185 }
8186 
8187 /*
8188  * We have a problem here : there is no easy way to give a CHS
8189  * virtual geometry. We currently pretend that we have a 2 heads
8190  * 4 sectors (with a BIG number of cylinders...). This drives
8191  * dosfs just mad... ;-)
8192  */
md_getgeo(struct gendisk * disk,struct hd_geometry * geo)8193 static int md_getgeo(struct gendisk *disk, struct hd_geometry *geo)
8194 {
8195 	struct mddev *mddev = disk->private_data;
8196 
8197 	geo->heads = 2;
8198 	geo->sectors = 4;
8199 	geo->cylinders = mddev->array_sectors / 8;
8200 	return 0;
8201 }
8202 
md_ioctl_valid(unsigned int cmd)8203 static inline int md_ioctl_valid(unsigned int cmd)
8204 {
8205 	switch (cmd) {
8206 	case GET_ARRAY_INFO:
8207 	case GET_DISK_INFO:
8208 	case RAID_VERSION:
8209 		return 0;
8210 	case ADD_NEW_DISK:
8211 	case GET_BITMAP_FILE:
8212 	case HOT_ADD_DISK:
8213 	case HOT_REMOVE_DISK:
8214 	case RESTART_ARRAY_RW:
8215 	case RUN_ARRAY:
8216 	case SET_ARRAY_INFO:
8217 	case SET_BITMAP_FILE:
8218 	case SET_DISK_FAULTY:
8219 	case STOP_ARRAY:
8220 	case STOP_ARRAY_RO:
8221 	case CLUSTERED_DISK_NACK:
8222 		if (!capable(CAP_SYS_ADMIN))
8223 			return -EACCES;
8224 		return 0;
8225 	default:
8226 		return -ENOTTY;
8227 	}
8228 }
8229 
md_ioctl_need_suspend(unsigned int cmd)8230 static bool md_ioctl_need_suspend(unsigned int cmd)
8231 {
8232 	switch (cmd) {
8233 	case ADD_NEW_DISK:
8234 	case HOT_ADD_DISK:
8235 	case HOT_REMOVE_DISK:
8236 	case SET_BITMAP_FILE:
8237 	case SET_ARRAY_INFO:
8238 		return true;
8239 	default:
8240 		return false;
8241 	}
8242 }
8243 
__md_set_array_info(struct mddev * mddev,void __user * argp)8244 static int __md_set_array_info(struct mddev *mddev, void __user *argp)
8245 {
8246 	mdu_array_info_t info;
8247 	int err;
8248 
8249 	if (!argp)
8250 		memset(&info, 0, sizeof(info));
8251 	else if (copy_from_user(&info, argp, sizeof(info)))
8252 		return -EFAULT;
8253 
8254 	if (mddev->pers) {
8255 		err = update_array_info(mddev, &info);
8256 		if (err)
8257 			pr_warn("md: couldn't update array info. %d\n", err);
8258 		return err;
8259 	}
8260 
8261 	if (!list_empty(&mddev->disks)) {
8262 		pr_warn("md: array %s already has disks!\n", mdname(mddev));
8263 		return -EBUSY;
8264 	}
8265 
8266 	if (mddev->raid_disks) {
8267 		pr_warn("md: array %s already initialised!\n", mdname(mddev));
8268 		return -EBUSY;
8269 	}
8270 
8271 	err = md_set_array_info(mddev, &info);
8272 	if (err)
8273 		pr_warn("md: couldn't set array info. %d\n", err);
8274 
8275 	return err;
8276 }
8277 
md_ioctl(struct block_device * bdev,blk_mode_t mode,unsigned int cmd,unsigned long arg)8278 static int md_ioctl(struct block_device *bdev, blk_mode_t mode,
8279 			unsigned int cmd, unsigned long arg)
8280 {
8281 	int err = 0;
8282 	void __user *argp = (void __user *)arg;
8283 	struct mddev *mddev = NULL;
8284 
8285 	err = md_ioctl_valid(cmd);
8286 	if (err)
8287 		return err;
8288 
8289 	/*
8290 	 * Commands dealing with the RAID driver but not any
8291 	 * particular array:
8292 	 */
8293 	if (cmd == RAID_VERSION)
8294 		return get_version(argp);
8295 
8296 	/*
8297 	 * Commands creating/starting a new array:
8298 	 */
8299 
8300 	mddev = bdev->bd_disk->private_data;
8301 
8302 	/* Some actions do not requires the mutex */
8303 	switch (cmd) {
8304 	case GET_ARRAY_INFO:
8305 		if (!mddev->raid_disks && !mddev->external)
8306 			return -ENODEV;
8307 		return get_array_info(mddev, argp);
8308 
8309 	case GET_DISK_INFO:
8310 		if (!mddev->raid_disks && !mddev->external)
8311 			return -ENODEV;
8312 		return get_disk_info(mddev, argp);
8313 
8314 	case SET_DISK_FAULTY:
8315 		return set_disk_faulty(mddev, new_decode_dev(arg));
8316 
8317 	case GET_BITMAP_FILE:
8318 		return get_bitmap_file(mddev, argp);
8319 	}
8320 
8321 	if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
8322 		/* Need to flush page cache, and ensure no-one else opens
8323 		 * and writes
8324 		 */
8325 		err = mddev_set_closing_and_sync_blockdev(mddev, 1);
8326 		if (err)
8327 			return err;
8328 	}
8329 
8330 	if (!md_is_rdwr(mddev))
8331 		flush_work(&mddev->sync_work);
8332 
8333 	err = md_ioctl_need_suspend(cmd) ? mddev_suspend_and_lock(mddev) :
8334 					   mddev_lock(mddev);
8335 	if (err) {
8336 		pr_debug("md: ioctl lock interrupted, reason %d, cmd %d\n",
8337 			 err, cmd);
8338 		goto out;
8339 	}
8340 
8341 	if (cmd == SET_ARRAY_INFO) {
8342 		err = __md_set_array_info(mddev, argp);
8343 		goto unlock;
8344 	}
8345 
8346 	/*
8347 	 * Commands querying/configuring an existing array:
8348 	 */
8349 	/* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
8350 	 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
8351 	if ((!mddev->raid_disks && !mddev->external)
8352 	    && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
8353 	    && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
8354 	    && cmd != GET_BITMAP_FILE) {
8355 		err = -ENODEV;
8356 		goto unlock;
8357 	}
8358 
8359 	/*
8360 	 * Commands even a read-only array can execute:
8361 	 */
8362 	switch (cmd) {
8363 	case RESTART_ARRAY_RW:
8364 		err = restart_array(mddev);
8365 		goto unlock;
8366 
8367 	case STOP_ARRAY:
8368 		err = do_md_stop(mddev, 0);
8369 		goto unlock;
8370 
8371 	case STOP_ARRAY_RO:
8372 		if (mddev->pers)
8373 			err = md_set_readonly(mddev);
8374 		goto unlock;
8375 
8376 	case HOT_REMOVE_DISK:
8377 		err = hot_remove_disk(mddev, new_decode_dev(arg));
8378 		goto unlock;
8379 
8380 	case ADD_NEW_DISK:
8381 		/* We can support ADD_NEW_DISK on read-only arrays
8382 		 * only if we are re-adding a preexisting device.
8383 		 * So require mddev->pers and MD_DISK_SYNC.
8384 		 */
8385 		if (mddev->pers) {
8386 			mdu_disk_info_t info;
8387 			if (copy_from_user(&info, argp, sizeof(info)))
8388 				err = -EFAULT;
8389 			else if (!(info.state & (1<<MD_DISK_SYNC)))
8390 				/* Need to clear read-only for this */
8391 				break;
8392 			else
8393 				err = md_add_new_disk(mddev, &info);
8394 			goto unlock;
8395 		}
8396 		break;
8397 	}
8398 
8399 	/*
8400 	 * The remaining ioctls are changing the state of the
8401 	 * superblock, so we do not allow them on read-only arrays.
8402 	 */
8403 	if (!md_is_rdwr(mddev) && mddev->pers) {
8404 		if (mddev->ro != MD_AUTO_READ) {
8405 			err = -EROFS;
8406 			goto unlock;
8407 		}
8408 		mddev->ro = MD_RDWR;
8409 		sysfs_notify_dirent_safe(mddev->sysfs_state);
8410 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8411 		/* mddev_unlock will wake thread */
8412 		/* If a device failed while we were read-only, we
8413 		 * need to make sure the metadata is updated now.
8414 		 */
8415 		if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) {
8416 			mddev_unlock(mddev);
8417 			wait_event(mddev->sb_wait,
8418 				   !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) &&
8419 				   !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
8420 			mddev_lock_nointr(mddev);
8421 		}
8422 	}
8423 
8424 	switch (cmd) {
8425 	case ADD_NEW_DISK:
8426 	{
8427 		mdu_disk_info_t info;
8428 		if (copy_from_user(&info, argp, sizeof(info)))
8429 			err = -EFAULT;
8430 		else
8431 			err = md_add_new_disk(mddev, &info);
8432 		goto unlock;
8433 	}
8434 
8435 	case CLUSTERED_DISK_NACK:
8436 		if (mddev_is_clustered(mddev))
8437 			mddev->cluster_ops->new_disk_ack(mddev, false);
8438 		else
8439 			err = -EINVAL;
8440 		goto unlock;
8441 
8442 	case HOT_ADD_DISK:
8443 		err = hot_add_disk(mddev, new_decode_dev(arg));
8444 		goto unlock;
8445 
8446 	case RUN_ARRAY:
8447 		err = do_md_run(mddev);
8448 		goto unlock;
8449 
8450 	case SET_BITMAP_FILE:
8451 		err = set_bitmap_file(mddev, (int)arg);
8452 		goto unlock;
8453 
8454 	default:
8455 		err = -EINVAL;
8456 		goto unlock;
8457 	}
8458 
8459 unlock:
8460 	if (mddev->hold_active == UNTIL_IOCTL &&
8461 	    err != -EINVAL)
8462 		mddev->hold_active = 0;
8463 
8464 	md_ioctl_need_suspend(cmd) ? mddev_unlock_and_resume(mddev) :
8465 				     mddev_unlock(mddev);
8466 
8467 out:
8468 	if (cmd == STOP_ARRAY_RO || (err && cmd == STOP_ARRAY))
8469 		clear_bit(MD_CLOSING, &mddev->flags);
8470 	return err;
8471 }
8472 #ifdef CONFIG_COMPAT
md_compat_ioctl(struct block_device * bdev,blk_mode_t mode,unsigned int cmd,unsigned long arg)8473 static int md_compat_ioctl(struct block_device *bdev, blk_mode_t mode,
8474 		    unsigned int cmd, unsigned long arg)
8475 {
8476 	switch (cmd) {
8477 	case HOT_REMOVE_DISK:
8478 	case HOT_ADD_DISK:
8479 	case SET_DISK_FAULTY:
8480 	case SET_BITMAP_FILE:
8481 		/* These take in integer arg, do not convert */
8482 		break;
8483 	default:
8484 		arg = (unsigned long)compat_ptr(arg);
8485 		break;
8486 	}
8487 
8488 	return md_ioctl(bdev, mode, cmd, arg);
8489 }
8490 #endif /* CONFIG_COMPAT */
8491 
md_set_read_only(struct block_device * bdev,bool ro)8492 static int md_set_read_only(struct block_device *bdev, bool ro)
8493 {
8494 	struct mddev *mddev = bdev->bd_disk->private_data;
8495 	int err;
8496 
8497 	err = mddev_lock(mddev);
8498 	if (err)
8499 		return err;
8500 
8501 	if (!mddev->raid_disks && !mddev->external) {
8502 		err = -ENODEV;
8503 		goto out_unlock;
8504 	}
8505 
8506 	/*
8507 	 * Transitioning to read-auto need only happen for arrays that call
8508 	 * md_write_start and which are not ready for writes yet.
8509 	 */
8510 	if (!ro && mddev->ro == MD_RDONLY && mddev->pers) {
8511 		err = restart_array(mddev);
8512 		if (err)
8513 			goto out_unlock;
8514 		mddev->ro = MD_AUTO_READ;
8515 	}
8516 
8517 out_unlock:
8518 	mddev_unlock(mddev);
8519 	return err;
8520 }
8521 
md_open(struct gendisk * disk,blk_mode_t mode)8522 static int md_open(struct gendisk *disk, blk_mode_t mode)
8523 {
8524 	struct mddev *mddev;
8525 	int err;
8526 
8527 	spin_lock(&all_mddevs_lock);
8528 	mddev = mddev_get(disk->private_data);
8529 	spin_unlock(&all_mddevs_lock);
8530 	if (!mddev)
8531 		return -ENODEV;
8532 
8533 	err = mutex_lock_interruptible(&mddev->open_mutex);
8534 	if (err)
8535 		goto out;
8536 
8537 	err = -ENODEV;
8538 	if (test_bit(MD_CLOSING, &mddev->flags))
8539 		goto out_unlock;
8540 
8541 	atomic_inc(&mddev->openers);
8542 	mutex_unlock(&mddev->open_mutex);
8543 
8544 	disk_check_media_change(disk);
8545 	return 0;
8546 
8547 out_unlock:
8548 	mutex_unlock(&mddev->open_mutex);
8549 out:
8550 	mddev_put(mddev);
8551 	return err;
8552 }
8553 
md_release(struct gendisk * disk)8554 static void md_release(struct gendisk *disk)
8555 {
8556 	struct mddev *mddev = disk->private_data;
8557 
8558 	BUG_ON(!mddev);
8559 	atomic_dec(&mddev->openers);
8560 	mddev_put(mddev);
8561 }
8562 
md_check_events(struct gendisk * disk,unsigned int clearing)8563 static unsigned int md_check_events(struct gendisk *disk, unsigned int clearing)
8564 {
8565 	struct mddev *mddev = disk->private_data;
8566 	unsigned int ret = 0;
8567 
8568 	if (mddev->changed)
8569 		ret = DISK_EVENT_MEDIA_CHANGE;
8570 	mddev->changed = 0;
8571 	return ret;
8572 }
8573 
md_free_disk(struct gendisk * disk)8574 static void md_free_disk(struct gendisk *disk)
8575 {
8576 	struct mddev *mddev = disk->private_data;
8577 
8578 	mddev_free(mddev);
8579 }
8580 
8581 const struct block_device_operations md_fops =
8582 {
8583 	.owner		= THIS_MODULE,
8584 	.submit_bio	= md_submit_bio,
8585 	.open		= md_open,
8586 	.release	= md_release,
8587 	.ioctl		= md_ioctl,
8588 #ifdef CONFIG_COMPAT
8589 	.compat_ioctl	= md_compat_ioctl,
8590 #endif
8591 	.getgeo		= md_getgeo,
8592 	.check_events	= md_check_events,
8593 	.set_read_only	= md_set_read_only,
8594 	.free_disk	= md_free_disk,
8595 };
8596 
md_thread(void * arg)8597 static int md_thread(void *arg)
8598 {
8599 	struct md_thread *thread = arg;
8600 
8601 	/*
8602 	 * md_thread is a 'system-thread', it's priority should be very
8603 	 * high. We avoid resource deadlocks individually in each
8604 	 * raid personality. (RAID5 does preallocation) We also use RR and
8605 	 * the very same RT priority as kswapd, thus we will never get
8606 	 * into a priority inversion deadlock.
8607 	 *
8608 	 * we definitely have to have equal or higher priority than
8609 	 * bdflush, otherwise bdflush will deadlock if there are too
8610 	 * many dirty RAID5 blocks.
8611 	 */
8612 
8613 	allow_signal(SIGKILL);
8614 	while (!kthread_should_stop()) {
8615 
8616 		/* We need to wait INTERRUPTIBLE so that
8617 		 * we don't add to the load-average.
8618 		 * That means we need to be sure no signals are
8619 		 * pending
8620 		 */
8621 		if (signal_pending(current))
8622 			flush_signals(current);
8623 
8624 		wait_event_interruptible_timeout
8625 			(thread->wqueue,
8626 			 test_bit(THREAD_WAKEUP, &thread->flags)
8627 			 || kthread_should_stop() || kthread_should_park(),
8628 			 thread->timeout);
8629 
8630 		clear_bit(THREAD_WAKEUP, &thread->flags);
8631 		if (kthread_should_park())
8632 			kthread_parkme();
8633 		if (!kthread_should_stop())
8634 			thread->run(thread);
8635 	}
8636 
8637 	return 0;
8638 }
8639 
md_wakeup_thread_directly(struct md_thread __rcu ** thread)8640 static void md_wakeup_thread_directly(struct md_thread __rcu **thread)
8641 {
8642 	struct md_thread *t;
8643 
8644 	rcu_read_lock();
8645 	t = rcu_dereference(*thread);
8646 	if (t)
8647 		wake_up_process(t->tsk);
8648 	rcu_read_unlock();
8649 }
8650 
__md_wakeup_thread(struct md_thread __rcu * thread)8651 void __md_wakeup_thread(struct md_thread __rcu *thread)
8652 {
8653 	struct md_thread *t;
8654 
8655 	t = rcu_dereference(thread);
8656 	if (t) {
8657 		pr_debug("md: waking up MD thread %s.\n", t->tsk->comm);
8658 		set_bit(THREAD_WAKEUP, &t->flags);
8659 		if (wq_has_sleeper(&t->wqueue))
8660 			wake_up(&t->wqueue);
8661 	}
8662 }
8663 EXPORT_SYMBOL(__md_wakeup_thread);
8664 
md_register_thread(void (* run)(struct md_thread *),struct mddev * mddev,const char * name)8665 struct md_thread *md_register_thread(void (*run) (struct md_thread *),
8666 		struct mddev *mddev, const char *name)
8667 {
8668 	struct md_thread *thread;
8669 
8670 	thread = kzalloc_obj(struct md_thread);
8671 	if (!thread)
8672 		return NULL;
8673 
8674 	init_waitqueue_head(&thread->wqueue);
8675 
8676 	thread->run = run;
8677 	thread->mddev = mddev;
8678 	thread->timeout = MAX_SCHEDULE_TIMEOUT;
8679 	thread->tsk = kthread_run(md_thread, thread,
8680 				  "%s_%s",
8681 				  mdname(thread->mddev),
8682 				  name);
8683 	if (IS_ERR(thread->tsk)) {
8684 		kfree(thread);
8685 		return NULL;
8686 	}
8687 	return thread;
8688 }
8689 EXPORT_SYMBOL(md_register_thread);
8690 
md_unregister_thread(struct mddev * mddev,struct md_thread __rcu ** threadp)8691 void md_unregister_thread(struct mddev *mddev, struct md_thread __rcu **threadp)
8692 {
8693 	struct md_thread *thread = rcu_dereference_protected(*threadp,
8694 					lockdep_is_held(&mddev->reconfig_mutex));
8695 
8696 	if (!thread)
8697 		return;
8698 
8699 	rcu_assign_pointer(*threadp, NULL);
8700 	synchronize_rcu();
8701 
8702 	pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
8703 	kthread_stop(thread->tsk);
8704 	kfree(thread);
8705 }
8706 EXPORT_SYMBOL(md_unregister_thread);
8707 
md_error(struct mddev * mddev,struct md_rdev * rdev)8708 void md_error(struct mddev *mddev, struct md_rdev *rdev)
8709 {
8710 	if (!rdev || test_bit(Faulty, &rdev->flags))
8711 		return;
8712 
8713 	if (!mddev->pers || !mddev->pers->error_handler)
8714 		return;
8715 	mddev->pers->error_handler(mddev, rdev);
8716 
8717 	if (mddev->pers->head.id == ID_RAID0 ||
8718 	    mddev->pers->head.id == ID_LINEAR)
8719 		return;
8720 
8721 	if (mddev->degraded && !test_bit(MD_BROKEN, &mddev->flags))
8722 		set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8723 	sysfs_notify_dirent_safe(rdev->sysfs_state);
8724 	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8725 	if (!test_bit(MD_BROKEN, &mddev->flags)) {
8726 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8727 		md_wakeup_thread(mddev->thread);
8728 	}
8729 	if (mddev->event_work.func)
8730 		queue_work(md_misc_wq, &mddev->event_work);
8731 	md_new_event();
8732 }
8733 EXPORT_SYMBOL(md_error);
8734 
8735 /* seq_file implementation /proc/mdstat */
8736 
status_unused(struct seq_file * seq)8737 static void status_unused(struct seq_file *seq)
8738 {
8739 	int i = 0;
8740 	struct md_rdev *rdev;
8741 
8742 	seq_printf(seq, "unused devices: ");
8743 
8744 	list_for_each_entry(rdev, &pending_raid_disks, same_set) {
8745 		i++;
8746 		seq_printf(seq, "%pg ", rdev->bdev);
8747 	}
8748 	if (!i)
8749 		seq_printf(seq, "<none>");
8750 
8751 	seq_printf(seq, "\n");
8752 }
8753 
status_personalities(struct seq_file * seq)8754 static void status_personalities(struct seq_file *seq)
8755 {
8756 	struct md_submodule_head *head;
8757 	unsigned long i;
8758 
8759 	seq_puts(seq, "Personalities : ");
8760 
8761 	xa_lock(&md_submodule);
8762 	xa_for_each(&md_submodule, i, head)
8763 		if (head->type == MD_PERSONALITY)
8764 			seq_printf(seq, "[%s] ", head->name);
8765 	xa_unlock(&md_submodule);
8766 
8767 	seq_puts(seq, "\n");
8768 }
8769 
status_resync(struct seq_file * seq,struct mddev * mddev)8770 static int status_resync(struct seq_file *seq, struct mddev *mddev)
8771 {
8772 	sector_t max_sectors, resync, res;
8773 	unsigned long dt, db = 0;
8774 	sector_t rt, curr_mark_cnt, resync_mark_cnt;
8775 	int scale, recovery_active;
8776 	unsigned int per_milli;
8777 
8778 	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
8779 	    test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
8780 		max_sectors = mddev->resync_max_sectors;
8781 	else
8782 		max_sectors = mddev->dev_sectors;
8783 
8784 	resync = mddev->curr_resync;
8785 	if (resync < MD_RESYNC_ACTIVE) {
8786 		if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
8787 			/* Still cleaning up */
8788 			resync = max_sectors;
8789 	} else if (resync > max_sectors) {
8790 		resync = max_sectors;
8791 	} else {
8792 		res = atomic_read(&mddev->recovery_active);
8793 		/*
8794 		 * Resync has started, but the subtraction has overflowed or
8795 		 * yielded one of the special values. Force it to active to
8796 		 * ensure the status reports an active resync.
8797 		 */
8798 		if (resync < res || resync - res < MD_RESYNC_ACTIVE)
8799 			resync = MD_RESYNC_ACTIVE;
8800 		else
8801 			resync -= res;
8802 	}
8803 
8804 	if (resync == MD_RESYNC_NONE) {
8805 		if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery)) {
8806 			struct md_rdev *rdev;
8807 
8808 			rdev_for_each(rdev, mddev)
8809 				if (rdev->raid_disk >= 0 &&
8810 				    !test_bit(Faulty, &rdev->flags) &&
8811 				    rdev->recovery_offset != MaxSector &&
8812 				    rdev->recovery_offset) {
8813 					seq_printf(seq, "\trecover=REMOTE");
8814 					return 1;
8815 				}
8816 			if (mddev->reshape_position != MaxSector)
8817 				seq_printf(seq, "\treshape=REMOTE");
8818 			else
8819 				seq_printf(seq, "\tresync=REMOTE");
8820 			return 1;
8821 		}
8822 		if (mddev->resync_offset < MaxSector) {
8823 			seq_printf(seq, "\tresync=PENDING");
8824 			return 1;
8825 		}
8826 		return 0;
8827 	}
8828 	if (resync < MD_RESYNC_ACTIVE) {
8829 		seq_printf(seq, "\tresync=DELAYED");
8830 		return 1;
8831 	}
8832 
8833 	WARN_ON(max_sectors == 0);
8834 	/* Pick 'scale' such that (resync>>scale)*1000 will fit
8835 	 * in a sector_t, and (max_sectors>>scale) will fit in a
8836 	 * u32, as those are the requirements for sector_div.
8837 	 * Thus 'scale' must be at least 10
8838 	 */
8839 	scale = 10;
8840 	if (sizeof(sector_t) > sizeof(unsigned long)) {
8841 		while ( max_sectors/2 > (1ULL<<(scale+32)))
8842 			scale++;
8843 	}
8844 	res = (resync>>scale)*1000;
8845 	sector_div(res, (u32)((max_sectors>>scale)+1));
8846 
8847 	per_milli = res;
8848 	{
8849 		int i, x = per_milli/50, y = 20-x;
8850 		seq_printf(seq, "[");
8851 		for (i = 0; i < x; i++)
8852 			seq_printf(seq, "=");
8853 		seq_printf(seq, ">");
8854 		for (i = 0; i < y; i++)
8855 			seq_printf(seq, ".");
8856 		seq_printf(seq, "] ");
8857 	}
8858 	seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
8859 		   (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
8860 		    "reshape" :
8861 		    (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
8862 		     "check" :
8863 		     (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
8864 		      "resync" : "recovery"))),
8865 		   per_milli/10, per_milli % 10,
8866 		   (unsigned long long) resync/2,
8867 		   (unsigned long long) max_sectors/2);
8868 
8869 	/*
8870 	 * dt: time from mark until now
8871 	 * db: blocks written from mark until now
8872 	 * rt: remaining time
8873 	 *
8874 	 * rt is a sector_t, which is always 64bit now. We are keeping
8875 	 * the original algorithm, but it is not really necessary.
8876 	 *
8877 	 * Original algorithm:
8878 	 *   So we divide before multiply in case it is 32bit and close
8879 	 *   to the limit.
8880 	 *   We scale the divisor (db) by 32 to avoid losing precision
8881 	 *   near the end of resync when the number of remaining sectors
8882 	 *   is close to 'db'.
8883 	 *   We then divide rt by 32 after multiplying by db to compensate.
8884 	 *   The '+1' avoids division by zero if db is very small.
8885 	 */
8886 	dt = ((jiffies - mddev->resync_mark) / HZ);
8887 	if (!dt) dt++;
8888 
8889 	curr_mark_cnt = mddev->curr_mark_cnt;
8890 	recovery_active = atomic_read(&mddev->recovery_active);
8891 	resync_mark_cnt = mddev->resync_mark_cnt;
8892 
8893 	if (curr_mark_cnt >= (recovery_active + resync_mark_cnt))
8894 		db = curr_mark_cnt - (recovery_active + resync_mark_cnt);
8895 
8896 	rt = max_sectors - resync;    /* number of remaining sectors */
8897 	rt = div64_u64(rt, db/32+1);
8898 	rt *= dt;
8899 	rt >>= 5;
8900 
8901 	seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
8902 		   ((unsigned long)rt % 60)/6);
8903 
8904 	seq_printf(seq, " speed=%ldK/sec", db/2/dt);
8905 	return 1;
8906 }
8907 
md_seq_start(struct seq_file * seq,loff_t * pos)8908 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
8909 	__acquires(&all_mddevs_lock)
8910 {
8911 	seq->poll_event = atomic_read(&md_event_count);
8912 	spin_lock(&all_mddevs_lock);
8913 
8914 	return seq_list_start_head(&all_mddevs, *pos);
8915 }
8916 
md_seq_next(struct seq_file * seq,void * v,loff_t * pos)8917 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
8918 {
8919 	return seq_list_next(v, &all_mddevs, pos);
8920 }
8921 
md_seq_stop(struct seq_file * seq,void * v)8922 static void md_seq_stop(struct seq_file *seq, void *v)
8923 	__releases(&all_mddevs_lock)
8924 {
8925 	spin_unlock(&all_mddevs_lock);
8926 }
8927 
md_bitmap_status(struct seq_file * seq,struct mddev * mddev)8928 static void md_bitmap_status(struct seq_file *seq, struct mddev *mddev)
8929 {
8930 	struct md_bitmap_stats stats;
8931 	unsigned long used_pages;
8932 	unsigned long chunk_kb;
8933 	int err;
8934 
8935 	if (!md_bitmap_enabled(mddev, false))
8936 		return;
8937 
8938 	err = mddev->bitmap_ops->get_stats(mddev->bitmap, &stats);
8939 	if (err)
8940 		return;
8941 
8942 	chunk_kb = mddev->bitmap_info.chunksize >> 10;
8943 	used_pages = stats.pages - stats.missing_pages;
8944 
8945 	seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], %lu%s chunk",
8946 		   used_pages, stats.pages, used_pages << (PAGE_SHIFT - 10),
8947 		   chunk_kb ? chunk_kb : mddev->bitmap_info.chunksize,
8948 		   chunk_kb ? "KB" : "B");
8949 
8950 	if (stats.file) {
8951 		seq_puts(seq, ", file: ");
8952 		seq_file_path(seq, stats.file, " \t\n");
8953 	}
8954 
8955 	seq_putc(seq, '\n');
8956 }
8957 
md_seq_show(struct seq_file * seq,void * v)8958 static int md_seq_show(struct seq_file *seq, void *v)
8959 {
8960 	struct mddev *mddev;
8961 	sector_t sectors;
8962 	struct md_rdev *rdev;
8963 
8964 	if (v == &all_mddevs) {
8965 		status_personalities(seq);
8966 		if (list_empty(&all_mddevs))
8967 			status_unused(seq);
8968 		return 0;
8969 	}
8970 
8971 	mddev = list_entry(v, struct mddev, all_mddevs);
8972 	if (!mddev_get(mddev))
8973 		return 0;
8974 
8975 	spin_unlock(&all_mddevs_lock);
8976 
8977 	/* prevent bitmap to be freed after checking */
8978 	mutex_lock(&mddev->bitmap_info.mutex);
8979 
8980 	spin_lock(&mddev->lock);
8981 	if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
8982 		seq_printf(seq, "%s : ", mdname(mddev));
8983 		if (mddev->pers) {
8984 			if (test_bit(MD_BROKEN, &mddev->flags))
8985 				seq_printf(seq, "broken");
8986 			else
8987 				seq_printf(seq, "active");
8988 			if (mddev->ro == MD_RDONLY)
8989 				seq_printf(seq, " (read-only)");
8990 			if (mddev->ro == MD_AUTO_READ)
8991 				seq_printf(seq, " (auto-read-only)");
8992 			seq_printf(seq, " %s", mddev->pers->head.name);
8993 		} else {
8994 			seq_printf(seq, "inactive");
8995 		}
8996 
8997 		sectors = 0;
8998 		rcu_read_lock();
8999 		rdev_for_each_rcu(rdev, mddev) {
9000 			seq_printf(seq, " %pg[%d]", rdev->bdev, rdev->desc_nr);
9001 
9002 			if (test_bit(WriteMostly, &rdev->flags))
9003 				seq_printf(seq, "(W)");
9004 			if (test_bit(Journal, &rdev->flags))
9005 				seq_printf(seq, "(J)");
9006 			if (test_bit(Faulty, &rdev->flags)) {
9007 				seq_printf(seq, "(F)");
9008 				continue;
9009 			}
9010 			if (rdev->raid_disk < 0)
9011 				seq_printf(seq, "(S)"); /* spare */
9012 			if (test_bit(Replacement, &rdev->flags))
9013 				seq_printf(seq, "(R)");
9014 			sectors += rdev->sectors;
9015 		}
9016 		rcu_read_unlock();
9017 
9018 		if (!list_empty(&mddev->disks)) {
9019 			if (mddev->pers)
9020 				seq_printf(seq, "\n      %llu blocks",
9021 					   (unsigned long long)
9022 					   mddev->array_sectors / 2);
9023 			else
9024 				seq_printf(seq, "\n      %llu blocks",
9025 					   (unsigned long long)sectors / 2);
9026 		}
9027 		if (mddev->persistent) {
9028 			if (mddev->major_version != 0 ||
9029 			    mddev->minor_version != 90) {
9030 				seq_printf(seq," super %d.%d",
9031 					   mddev->major_version,
9032 					   mddev->minor_version);
9033 			}
9034 		} else if (mddev->external)
9035 			seq_printf(seq, " super external:%s",
9036 				   mddev->metadata_type);
9037 		else
9038 			seq_printf(seq, " super non-persistent");
9039 
9040 		if (mddev->pers) {
9041 			mddev->pers->status(seq, mddev);
9042 			seq_printf(seq, "\n      ");
9043 			if (mddev->pers->sync_request) {
9044 				if (status_resync(seq, mddev))
9045 					seq_printf(seq, "\n      ");
9046 			}
9047 		} else
9048 			seq_printf(seq, "\n       ");
9049 
9050 		md_bitmap_status(seq, mddev);
9051 
9052 		seq_printf(seq, "\n");
9053 	}
9054 	spin_unlock(&mddev->lock);
9055 	mutex_unlock(&mddev->bitmap_info.mutex);
9056 	spin_lock(&all_mddevs_lock);
9057 
9058 	if (mddev == list_last_entry(&all_mddevs, struct mddev, all_mddevs))
9059 		status_unused(seq);
9060 
9061 	mddev_put_locked(mddev);
9062 	return 0;
9063 }
9064 
9065 static const struct seq_operations md_seq_ops = {
9066 	.start  = md_seq_start,
9067 	.next   = md_seq_next,
9068 	.stop   = md_seq_stop,
9069 	.show   = md_seq_show,
9070 };
9071 
md_seq_open(struct inode * inode,struct file * file)9072 static int md_seq_open(struct inode *inode, struct file *file)
9073 {
9074 	struct seq_file *seq;
9075 	int error;
9076 
9077 	error = seq_open(file, &md_seq_ops);
9078 	if (error)
9079 		return error;
9080 
9081 	seq = file->private_data;
9082 	seq->poll_event = atomic_read(&md_event_count);
9083 	return error;
9084 }
9085 
9086 static int md_unloading;
mdstat_poll(struct file * filp,poll_table * wait)9087 static __poll_t mdstat_poll(struct file *filp, poll_table *wait)
9088 {
9089 	struct seq_file *seq = filp->private_data;
9090 	__poll_t mask;
9091 
9092 	if (md_unloading)
9093 		return EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI;
9094 	poll_wait(filp, &md_event_waiters, wait);
9095 
9096 	/* always allow read */
9097 	mask = EPOLLIN | EPOLLRDNORM;
9098 
9099 	if (seq->poll_event != atomic_read(&md_event_count))
9100 		mask |= EPOLLERR | EPOLLPRI;
9101 	return mask;
9102 }
9103 
9104 static const struct proc_ops mdstat_proc_ops = {
9105 	.proc_open	= md_seq_open,
9106 	.proc_read	= seq_read,
9107 	.proc_lseek	= seq_lseek,
9108 	.proc_release	= seq_release,
9109 	.proc_poll	= mdstat_poll,
9110 };
9111 
register_md_submodule(struct md_submodule_head * msh)9112 int register_md_submodule(struct md_submodule_head *msh)
9113 {
9114 	return xa_insert(&md_submodule, msh->id, msh, GFP_KERNEL);
9115 }
9116 EXPORT_SYMBOL_GPL(register_md_submodule);
9117 
unregister_md_submodule(struct md_submodule_head * msh)9118 void unregister_md_submodule(struct md_submodule_head *msh)
9119 {
9120 	xa_erase(&md_submodule, msh->id);
9121 }
9122 EXPORT_SYMBOL_GPL(unregister_md_submodule);
9123 
md_setup_cluster(struct mddev * mddev,int nodes)9124 int md_setup_cluster(struct mddev *mddev, int nodes)
9125 {
9126 	int ret = get_cluster_ops(mddev);
9127 
9128 	if (ret) {
9129 		request_module("md-cluster");
9130 		ret = get_cluster_ops(mddev);
9131 	}
9132 
9133 	/* ensure module won't be unloaded */
9134 	if (ret) {
9135 		pr_warn("can't find md-cluster module or get its reference.\n");
9136 		return ret;
9137 	}
9138 
9139 	ret = mddev->cluster_ops->join(mddev, nodes);
9140 	if (!ret)
9141 		mddev->safemode_delay = 0;
9142 	return ret;
9143 }
9144 
md_cluster_stop(struct mddev * mddev)9145 void md_cluster_stop(struct mddev *mddev)
9146 {
9147 	put_cluster_ops(mddev);
9148 }
9149 
is_rdev_holder_idle(struct md_rdev * rdev,bool init)9150 static bool is_rdev_holder_idle(struct md_rdev *rdev, bool init)
9151 {
9152 	unsigned long last_events = rdev->last_events;
9153 
9154 	if (!bdev_is_partition(rdev->bdev))
9155 		return true;
9156 
9157 	/*
9158 	 * If rdev is partition, and user doesn't issue IO to the array, the
9159 	 * array is still not idle if user issues IO to other partitions.
9160 	 */
9161 	rdev->last_events = part_stat_read_accum(rdev->bdev->bd_disk->part0,
9162 						 sectors) -
9163 			    part_stat_read_accum(rdev->bdev, sectors);
9164 
9165 	return init || rdev->last_events <= last_events;
9166 }
9167 
9168 /*
9169  * mddev is idle if following conditions are matched since last check:
9170  * 1) mddev doesn't have normal IO completed;
9171  * 2) mddev doesn't have inflight normal IO;
9172  * 3) if any member disk is partition, and other partitions don't have IO
9173  *    completed;
9174  *
9175  * Noted this checking rely on IO accounting is enabled.
9176  */
is_mddev_idle(struct mddev * mddev,int init)9177 static bool is_mddev_idle(struct mddev *mddev, int init)
9178 {
9179 	unsigned long last_events = mddev->normal_io_events;
9180 	struct gendisk *disk;
9181 	struct md_rdev *rdev;
9182 	bool idle = true;
9183 
9184 	disk = mddev_is_dm(mddev) ? mddev->dm_gendisk : mddev->gendisk;
9185 	if (!disk)
9186 		return true;
9187 
9188 	mddev->normal_io_events = part_stat_read_accum(disk->part0, sectors);
9189 	if (!init && (mddev->normal_io_events > last_events ||
9190 		      bdev_count_inflight(disk->part0)))
9191 		idle = false;
9192 
9193 	rcu_read_lock();
9194 	rdev_for_each_rcu(rdev, mddev)
9195 		if (!is_rdev_holder_idle(rdev, init))
9196 			idle = false;
9197 	rcu_read_unlock();
9198 
9199 	return idle;
9200 }
9201 
md_done_sync(struct mddev * mddev,int blocks)9202 void md_done_sync(struct mddev *mddev, int blocks)
9203 {
9204 	/* another "blocks" (512byte) blocks have been synced */
9205 	atomic_sub(blocks, &mddev->recovery_active);
9206 	wake_up(&mddev->recovery_wait);
9207 }
9208 EXPORT_SYMBOL(md_done_sync);
9209 
md_sync_error(struct mddev * mddev)9210 void md_sync_error(struct mddev *mddev)
9211 {
9212 	// stop recovery, signal do_sync ....
9213 	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
9214 	md_wakeup_thread(mddev->thread);
9215 }
9216 EXPORT_SYMBOL(md_sync_error);
9217 
9218 /* md_write_start(mddev, bi)
9219  * If we need to update some array metadata (e.g. 'active' flag
9220  * in superblock) before writing, schedule a superblock update
9221  * and wait for it to complete.
9222  * A return value of 'false' means that the write wasn't recorded
9223  * and cannot proceed as the array is being suspend.
9224  */
md_write_start(struct mddev * mddev,struct bio * bi)9225 void md_write_start(struct mddev *mddev, struct bio *bi)
9226 {
9227 	int did_change = 0;
9228 
9229 	if (bio_data_dir(bi) != WRITE)
9230 		return;
9231 
9232 	BUG_ON(mddev->ro == MD_RDONLY);
9233 	if (mddev->ro == MD_AUTO_READ) {
9234 		/* need to switch to read/write */
9235 		mddev->ro = MD_RDWR;
9236 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9237 		md_wakeup_thread(mddev->thread);
9238 		md_wakeup_thread(mddev->sync_thread);
9239 		did_change = 1;
9240 	}
9241 	rcu_read_lock();
9242 	percpu_ref_get(&mddev->writes_pending);
9243 	smp_mb(); /* Match smp_mb in set_in_sync() */
9244 	if (mddev->safemode == 1)
9245 		mddev->safemode = 0;
9246 	/* sync_checkers is always 0 when writes_pending is in per-cpu mode */
9247 	if (mddev->in_sync || mddev->sync_checkers) {
9248 		spin_lock(&mddev->lock);
9249 		if (mddev->in_sync) {
9250 			mddev->in_sync = 0;
9251 			set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
9252 			set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
9253 			md_wakeup_thread(mddev->thread);
9254 			did_change = 1;
9255 		}
9256 		spin_unlock(&mddev->lock);
9257 	}
9258 	rcu_read_unlock();
9259 	if (did_change)
9260 		sysfs_notify_dirent_safe(mddev->sysfs_state);
9261 	if (!test_bit(MD_HAS_SUPERBLOCK, &mddev->flags))
9262 		return;
9263 	wait_event(mddev->sb_wait,
9264 		   !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
9265 }
9266 EXPORT_SYMBOL(md_write_start);
9267 
9268 /* md_write_inc can only be called when md_write_start() has
9269  * already been called at least once of the current request.
9270  * It increments the counter and is useful when a single request
9271  * is split into several parts.  Each part causes an increment and
9272  * so needs a matching md_write_end().
9273  * Unlike md_write_start(), it is safe to call md_write_inc() inside
9274  * a spinlocked region.
9275  */
md_write_inc(struct mddev * mddev,struct bio * bi)9276 void md_write_inc(struct mddev *mddev, struct bio *bi)
9277 {
9278 	if (bio_data_dir(bi) != WRITE)
9279 		return;
9280 	WARN_ON_ONCE(mddev->in_sync || !md_is_rdwr(mddev));
9281 	percpu_ref_get(&mddev->writes_pending);
9282 }
9283 EXPORT_SYMBOL(md_write_inc);
9284 
md_write_end(struct mddev * mddev)9285 void md_write_end(struct mddev *mddev)
9286 {
9287 	percpu_ref_put(&mddev->writes_pending);
9288 
9289 	if (mddev->safemode == 2)
9290 		md_wakeup_thread(mddev->thread);
9291 	else if (mddev->safemode_delay)
9292 		/* The roundup() ensures this only performs locking once
9293 		 * every ->safemode_delay jiffies
9294 		 */
9295 		mod_timer(&mddev->safemode_timer,
9296 			  roundup(jiffies, mddev->safemode_delay) +
9297 			  mddev->safemode_delay);
9298 }
9299 
9300 EXPORT_SYMBOL(md_write_end);
9301 
9302 /* This is used by raid0 and raid10 */
md_submit_discard_bio(struct mddev * mddev,struct md_rdev * rdev,struct bio * bio,sector_t start,sector_t size)9303 void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
9304 			struct bio *bio, sector_t start, sector_t size)
9305 {
9306 	struct bio *discard_bio = NULL;
9307 
9308 	__blkdev_issue_discard(rdev->bdev, start, size, GFP_NOIO, &discard_bio);
9309 	if (!discard_bio)
9310 		return;
9311 
9312 	bio_chain(discard_bio, bio);
9313 	bio_clone_blkg_association(discard_bio, bio);
9314 	mddev_trace_remap(mddev, discard_bio, bio->bi_iter.bi_sector);
9315 	submit_bio_noacct(discard_bio);
9316 }
9317 EXPORT_SYMBOL_GPL(md_submit_discard_bio);
9318 
md_bitmap_start(struct mddev * mddev,struct md_io_clone * md_io_clone)9319 static void md_bitmap_start(struct mddev *mddev,
9320 			    struct md_io_clone *md_io_clone)
9321 {
9322 	md_bitmap_fn *fn = unlikely(md_io_clone->rw == STAT_DISCARD) ?
9323 			   mddev->bitmap_ops->start_discard :
9324 			   mddev->bitmap_ops->start_write;
9325 
9326 	if (mddev->pers->bitmap_sector)
9327 		mddev->pers->bitmap_sector(mddev, &md_io_clone->offset,
9328 					   &md_io_clone->sectors);
9329 
9330 	fn(mddev, md_io_clone->offset, md_io_clone->sectors);
9331 }
9332 
md_bitmap_end(struct mddev * mddev,struct md_io_clone * md_io_clone)9333 static void md_bitmap_end(struct mddev *mddev, struct md_io_clone *md_io_clone)
9334 {
9335 	md_bitmap_fn *fn = unlikely(md_io_clone->rw == STAT_DISCARD) ?
9336 			   mddev->bitmap_ops->end_discard :
9337 			   mddev->bitmap_ops->end_write;
9338 
9339 	fn(mddev, md_io_clone->offset, md_io_clone->sectors);
9340 }
9341 
md_end_clone_io(struct bio * bio)9342 static void md_end_clone_io(struct bio *bio)
9343 {
9344 	struct md_io_clone *md_io_clone = bio->bi_private;
9345 	struct bio *orig_bio = md_io_clone->orig_bio;
9346 	struct mddev *mddev = md_io_clone->mddev;
9347 
9348 	if (bio_data_dir(orig_bio) == WRITE && md_bitmap_enabled(mddev, false))
9349 		md_bitmap_end(mddev, md_io_clone);
9350 
9351 	if (bio->bi_status && !orig_bio->bi_status)
9352 		orig_bio->bi_status = bio->bi_status;
9353 
9354 	if (md_io_clone->start_time)
9355 		bio_end_io_acct(orig_bio, md_io_clone->start_time);
9356 
9357 	bio_put(bio);
9358 	bio_endio(orig_bio);
9359 	percpu_ref_put(&mddev->active_io);
9360 }
9361 
md_clone_bio(struct mddev * mddev,struct bio ** bio)9362 static void md_clone_bio(struct mddev *mddev, struct bio **bio)
9363 {
9364 	struct block_device *bdev = (*bio)->bi_bdev;
9365 	struct md_io_clone *md_io_clone;
9366 	struct bio *clone =
9367 		bio_alloc_clone(bdev, *bio, GFP_NOIO, &mddev->io_clone_set);
9368 
9369 	md_io_clone = container_of(clone, struct md_io_clone, bio_clone);
9370 	md_io_clone->orig_bio = *bio;
9371 	md_io_clone->mddev = mddev;
9372 	if (blk_queue_io_stat(bdev->bd_disk->queue))
9373 		md_io_clone->start_time = bio_start_io_acct(*bio);
9374 
9375 	if (bio_data_dir(*bio) == WRITE && md_bitmap_enabled(mddev, false)) {
9376 		md_io_clone->offset = (*bio)->bi_iter.bi_sector;
9377 		md_io_clone->sectors = bio_sectors(*bio);
9378 		md_io_clone->rw = op_stat_group(bio_op(*bio));
9379 		md_bitmap_start(mddev, md_io_clone);
9380 	}
9381 
9382 	clone->bi_end_io = md_end_clone_io;
9383 	clone->bi_private = md_io_clone;
9384 	*bio = clone;
9385 }
9386 
md_account_bio(struct mddev * mddev,struct bio ** bio)9387 void md_account_bio(struct mddev *mddev, struct bio **bio)
9388 {
9389 	percpu_ref_get(&mddev->active_io);
9390 	md_clone_bio(mddev, bio);
9391 }
9392 EXPORT_SYMBOL_GPL(md_account_bio);
9393 
md_free_cloned_bio(struct bio * bio)9394 void md_free_cloned_bio(struct bio *bio)
9395 {
9396 	struct md_io_clone *md_io_clone = bio->bi_private;
9397 	struct bio *orig_bio = md_io_clone->orig_bio;
9398 	struct mddev *mddev = md_io_clone->mddev;
9399 
9400 	if (bio_data_dir(orig_bio) == WRITE && md_bitmap_enabled(mddev, false))
9401 		md_bitmap_end(mddev, md_io_clone);
9402 
9403 	if (bio->bi_status && !orig_bio->bi_status)
9404 		orig_bio->bi_status = bio->bi_status;
9405 
9406 	if (md_io_clone->start_time)
9407 		bio_end_io_acct(orig_bio, md_io_clone->start_time);
9408 
9409 	bio_put(bio);
9410 	percpu_ref_put(&mddev->active_io);
9411 }
9412 EXPORT_SYMBOL_GPL(md_free_cloned_bio);
9413 
9414 /* md_allow_write(mddev)
9415  * Calling this ensures that the array is marked 'active' so that writes
9416  * may proceed without blocking.  It is important to call this before
9417  * attempting a GFP_KERNEL allocation while holding the mddev lock.
9418  * Must be called with mddev_lock held.
9419  */
md_allow_write(struct mddev * mddev)9420 void md_allow_write(struct mddev *mddev)
9421 {
9422 	if (!mddev->pers)
9423 		return;
9424 	if (!md_is_rdwr(mddev))
9425 		return;
9426 	if (!mddev->pers->sync_request)
9427 		return;
9428 
9429 	spin_lock(&mddev->lock);
9430 	if (mddev->in_sync) {
9431 		mddev->in_sync = 0;
9432 		set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
9433 		set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
9434 		if (mddev->safemode_delay &&
9435 		    mddev->safemode == 0)
9436 			mddev->safemode = 1;
9437 		spin_unlock(&mddev->lock);
9438 		md_update_sb(mddev, 0);
9439 		sysfs_notify_dirent_safe(mddev->sysfs_state);
9440 		/* wait for the dirty state to be recorded in the metadata */
9441 		wait_event(mddev->sb_wait,
9442 			   !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
9443 	} else
9444 		spin_unlock(&mddev->lock);
9445 }
9446 EXPORT_SYMBOL_GPL(md_allow_write);
9447 
md_sync_max_sectors(struct mddev * mddev,enum sync_action action)9448 static sector_t md_sync_max_sectors(struct mddev *mddev,
9449 				    enum sync_action action)
9450 {
9451 	switch (action) {
9452 	case ACTION_RESYNC:
9453 	case ACTION_CHECK:
9454 	case ACTION_REPAIR:
9455 		atomic64_set(&mddev->resync_mismatches, 0);
9456 		fallthrough;
9457 	case ACTION_RESHAPE:
9458 		return mddev->resync_max_sectors;
9459 	case ACTION_RECOVER:
9460 		return mddev->dev_sectors;
9461 	default:
9462 		return 0;
9463 	}
9464 }
9465 
9466 /*
9467  * If lazy recovery is requested and all rdevs are in sync, select the rdev with
9468  * the higest index to perfore recovery to build initial xor data, this is the
9469  * same as old bitmap.
9470  */
mddev_select_lazy_recover_rdev(struct mddev * mddev)9471 static bool mddev_select_lazy_recover_rdev(struct mddev *mddev)
9472 {
9473 	struct md_rdev *recover_rdev = NULL;
9474 	struct md_rdev *rdev;
9475 	bool ret = false;
9476 
9477 	rcu_read_lock();
9478 	rdev_for_each_rcu(rdev, mddev) {
9479 		if (rdev->raid_disk < 0)
9480 			continue;
9481 
9482 		if (test_bit(Faulty, &rdev->flags) ||
9483 		    !test_bit(In_sync, &rdev->flags))
9484 			break;
9485 
9486 		if (!recover_rdev || recover_rdev->raid_disk < rdev->raid_disk)
9487 			recover_rdev = rdev;
9488 	}
9489 
9490 	if (recover_rdev) {
9491 		clear_bit(In_sync, &recover_rdev->flags);
9492 		ret = true;
9493 	}
9494 
9495 	rcu_read_unlock();
9496 	return ret;
9497 }
9498 
md_sync_position(struct mddev * mddev,enum sync_action action)9499 static sector_t md_sync_position(struct mddev *mddev, enum sync_action action)
9500 {
9501 	sector_t start = 0;
9502 	struct md_rdev *rdev;
9503 
9504 	switch (action) {
9505 	case ACTION_CHECK:
9506 	case ACTION_REPAIR:
9507 		return mddev->resync_min;
9508 	case ACTION_RESYNC:
9509 		if (!mddev->bitmap)
9510 			return mddev->resync_offset;
9511 		return 0;
9512 	case ACTION_RESHAPE:
9513 		/*
9514 		 * If the original node aborts reshaping then we continue the
9515 		 * reshaping, so set again to avoid restart reshape from the
9516 		 * first beginning
9517 		 */
9518 		if (mddev_is_clustered(mddev) &&
9519 		    mddev->reshape_position != MaxSector)
9520 			return mddev->reshape_position;
9521 		return 0;
9522 	case ACTION_RECOVER:
9523 		start = MaxSector;
9524 		rcu_read_lock();
9525 		rdev_for_each_rcu(rdev, mddev)
9526 			if (rdev_needs_recovery(rdev, start))
9527 				start = rdev->recovery_offset;
9528 		rcu_read_unlock();
9529 
9530 		/*
9531 		 * If there are no spares, and raid456 lazy initial recover is
9532 		 * requested.
9533 		 */
9534 		if (test_bit(MD_RECOVERY_LAZY_RECOVER, &mddev->recovery) &&
9535 		    start == MaxSector && mddev_select_lazy_recover_rdev(mddev))
9536 			start = 0;
9537 
9538 		/* If there is a bitmap, we need to make sure all
9539 		 * writes that started before we added a spare
9540 		 * complete before we start doing a recovery.
9541 		 * Otherwise the write might complete and (via
9542 		 * bitmap_endwrite) set a bit in the bitmap after the
9543 		 * recovery has checked that bit and skipped that
9544 		 * region.
9545 		 */
9546 		if (mddev->bitmap) {
9547 			mddev->pers->quiesce(mddev, 1);
9548 			mddev->pers->quiesce(mddev, 0);
9549 		}
9550 		return start;
9551 	default:
9552 		return MaxSector;
9553 	}
9554 }
9555 
sync_io_within_limit(struct mddev * mddev)9556 static bool sync_io_within_limit(struct mddev *mddev)
9557 {
9558 	/*
9559 	 * For raid456, sync IO is stripe(4k) per IO, for other levels, it's
9560 	 * RESYNC_PAGES(64k) per IO.
9561 	 */
9562 	return atomic_read(&mddev->recovery_active) <
9563 	       (raid_is_456(mddev) ? 8 : 128) * sync_io_depth(mddev);
9564 }
9565 
9566 /*
9567  * Update sync offset and mddev status when sync completes
9568  */
md_finish_sync(struct mddev * mddev,enum sync_action action)9569 static void md_finish_sync(struct mddev *mddev, enum sync_action action)
9570 {
9571 	struct md_rdev *rdev;
9572 
9573 	switch (action) {
9574 	case ACTION_RESYNC:
9575 	case ACTION_REPAIR:
9576 		if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
9577 			mddev->curr_resync = MaxSector;
9578 		mddev->resync_offset = mddev->curr_resync;
9579 		break;
9580 	case ACTION_RECOVER:
9581 		if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
9582 			mddev->curr_resync = MaxSector;
9583 		rcu_read_lock();
9584 		rdev_for_each_rcu(rdev, mddev)
9585 			if (mddev->delta_disks >= 0 &&
9586 			    rdev_needs_recovery(rdev, mddev->curr_resync))
9587 				rdev->recovery_offset = mddev->curr_resync;
9588 		rcu_read_unlock();
9589 		break;
9590 	case ACTION_RESHAPE:
9591 		if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
9592 		    mddev->delta_disks > 0 &&
9593 		    mddev->pers->finish_reshape &&
9594 		    mddev->pers->size &&
9595 		    !mddev_is_dm(mddev)) {
9596 			mddev_lock_nointr(mddev);
9597 			md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
9598 			mddev_unlock(mddev);
9599 			if (!mddev_is_clustered(mddev))
9600 				set_capacity_and_notify(mddev->gendisk,
9601 							mddev->array_sectors);
9602 		}
9603 		if (mddev->pers->finish_reshape)
9604 			mddev->pers->finish_reshape(mddev);
9605 		break;
9606 	/* */
9607 	case ACTION_CHECK:
9608 	default:
9609 		break;
9610 	}
9611 }
9612 
9613 #define SYNC_MARKS	10
9614 #define	SYNC_MARK_STEP	(3*HZ)
9615 #define UPDATE_FREQUENCY (5*60*HZ)
md_do_sync(struct md_thread * thread)9616 void md_do_sync(struct md_thread *thread)
9617 {
9618 	struct mddev *mddev = thread->mddev;
9619 	struct mddev *mddev2;
9620 	unsigned int currspeed = 0, window;
9621 	sector_t max_sectors,j, io_sectors, recovery_done;
9622 	unsigned long mark[SYNC_MARKS];
9623 	unsigned long update_time;
9624 	sector_t mark_cnt[SYNC_MARKS];
9625 	int last_mark,m;
9626 	sector_t last_check;
9627 	int skipped = 0;
9628 	enum sync_action action;
9629 	const char *desc;
9630 	struct blk_plug plug;
9631 	int ret;
9632 
9633 	/* just incase thread restarts... */
9634 	if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
9635 		return;
9636 
9637 	if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
9638 		goto skip;
9639 
9640 	if (test_bit(MD_RECOVERY_WAIT, &mddev->recovery) ||
9641 	    !md_is_rdwr(mddev)) {/* never try to sync a read-only array */
9642 		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
9643 		goto skip;
9644 	}
9645 
9646 	if (mddev_is_clustered(mddev)) {
9647 		ret = mddev->cluster_ops->resync_start(mddev);
9648 		if (ret)
9649 			goto skip;
9650 
9651 		set_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags);
9652 		if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
9653 			test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ||
9654 			test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
9655 		     && ((unsigned long long)mddev->curr_resync_completed
9656 			 < (unsigned long long)mddev->resync_max_sectors))
9657 			goto skip;
9658 	}
9659 
9660 	action = md_sync_action(mddev);
9661 	if (action == ACTION_FROZEN || action == ACTION_IDLE) {
9662 		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
9663 		goto skip;
9664 	}
9665 
9666 	desc = md_sync_action_name(action);
9667 	mddev->last_sync_action = action;
9668 
9669 	/*
9670 	 * Before starting a resync we must have set curr_resync to
9671 	 * 2, and then checked that every "conflicting" array has curr_resync
9672 	 * less than ours.  When we find one that is the same or higher
9673 	 * we wait on resync_wait.  To avoid deadlock, we reduce curr_resync
9674 	 * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
9675 	 * This will mean we have to start checking from the beginning again.
9676 	 *
9677 	 */
9678 	if (mddev_is_clustered(mddev))
9679 		mddev->cluster_ops->resync_start_notify(mddev);
9680 	do {
9681 		int mddev2_minor = -1;
9682 		mddev->curr_resync = MD_RESYNC_DELAYED;
9683 
9684 	try_again:
9685 		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
9686 			goto skip;
9687 		spin_lock(&all_mddevs_lock);
9688 		list_for_each_entry(mddev2, &all_mddevs, all_mddevs) {
9689 			if (test_bit(MD_DELETED, &mddev2->flags))
9690 				continue;
9691 			if (mddev2 == mddev)
9692 				continue;
9693 			if (!mddev->parallel_resync
9694 			&&  mddev2->curr_resync
9695 			&&  match_mddev_units(mddev, mddev2)) {
9696 				DEFINE_WAIT(wq);
9697 				if (mddev < mddev2 &&
9698 				    mddev->curr_resync == MD_RESYNC_DELAYED) {
9699 					/* arbitrarily yield */
9700 					mddev->curr_resync = MD_RESYNC_YIELDED;
9701 					wake_up(&resync_wait);
9702 				}
9703 				if (mddev > mddev2 &&
9704 				    mddev->curr_resync == MD_RESYNC_YIELDED)
9705 					/* no need to wait here, we can wait the next
9706 					 * time 'round when curr_resync == 2
9707 					 */
9708 					continue;
9709 				/* We need to wait 'interruptible' so as not to
9710 				 * contribute to the load average, and not to
9711 				 * be caught by 'softlockup'
9712 				 */
9713 				prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
9714 				if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
9715 				    mddev2->curr_resync >= mddev->curr_resync) {
9716 					if (mddev2_minor != mddev2->md_minor) {
9717 						mddev2_minor = mddev2->md_minor;
9718 						pr_info("md: delaying %s of %s until %s has finished (they share one or more physical units)\n",
9719 							desc, mdname(mddev),
9720 							mdname(mddev2));
9721 					}
9722 					spin_unlock(&all_mddevs_lock);
9723 
9724 					if (signal_pending(current))
9725 						flush_signals(current);
9726 					schedule();
9727 					finish_wait(&resync_wait, &wq);
9728 					goto try_again;
9729 				}
9730 				finish_wait(&resync_wait, &wq);
9731 			}
9732 		}
9733 		spin_unlock(&all_mddevs_lock);
9734 	} while (mddev->curr_resync < MD_RESYNC_DELAYED);
9735 
9736 	max_sectors = md_sync_max_sectors(mddev, action);
9737 	j = md_sync_position(mddev, action);
9738 
9739 	pr_info("md: %s of RAID array %s\n", desc, mdname(mddev));
9740 	pr_debug("md: minimum _guaranteed_  speed: %d KB/sec/disk.\n", speed_min(mddev));
9741 	pr_debug("md: using maximum available idle IO bandwidth (but not more than %d KB/sec) for %s.\n",
9742 		 speed_max(mddev), desc);
9743 
9744 	is_mddev_idle(mddev, 1); /* this initializes IO event counters */
9745 
9746 	io_sectors = 0;
9747 	for (m = 0; m < SYNC_MARKS; m++) {
9748 		mark[m] = jiffies;
9749 		mark_cnt[m] = io_sectors;
9750 	}
9751 	last_mark = 0;
9752 	mddev->resync_mark = mark[last_mark];
9753 	mddev->resync_mark_cnt = mark_cnt[last_mark];
9754 
9755 	/*
9756 	 * Tune reconstruction:
9757 	 */
9758 	window = 32 * (PAGE_SIZE / 512);
9759 	pr_debug("md: using %dk window, over a total of %lluk.\n",
9760 		 window/2, (unsigned long long)max_sectors/2);
9761 
9762 	atomic_set(&mddev->recovery_active, 0);
9763 	last_check = 0;
9764 
9765 	if (j >= MD_RESYNC_ACTIVE) {
9766 		pr_debug("md: resuming %s of %s from checkpoint.\n",
9767 			 desc, mdname(mddev));
9768 		mddev->curr_resync = j;
9769 	} else
9770 		mddev->curr_resync = MD_RESYNC_ACTIVE; /* no longer delayed */
9771 	mddev->curr_resync_completed = j;
9772 	sysfs_notify_dirent_safe(mddev->sysfs_completed);
9773 	md_new_event();
9774 	update_time = jiffies;
9775 
9776 	blk_start_plug(&plug);
9777 	while (j < max_sectors) {
9778 		sector_t sectors;
9779 
9780 		skipped = 0;
9781 
9782 		if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
9783 		    ((mddev->curr_resync > mddev->curr_resync_completed &&
9784 		      (mddev->curr_resync - mddev->curr_resync_completed)
9785 		      > (max_sectors >> 4)) ||
9786 		     time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) ||
9787 		     (j - mddev->curr_resync_completed)*2
9788 		     >= mddev->resync_max - mddev->curr_resync_completed ||
9789 		     mddev->curr_resync_completed > mddev->resync_max
9790 			    )) {
9791 			/* time to update curr_resync_completed */
9792 			wait_event(mddev->recovery_wait,
9793 				   atomic_read(&mddev->recovery_active) == 0);
9794 			mddev->curr_resync_completed = j;
9795 			if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
9796 			    j > mddev->resync_offset)
9797 				mddev->resync_offset = j;
9798 			update_time = jiffies;
9799 			set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
9800 			sysfs_notify_dirent_safe(mddev->sysfs_completed);
9801 		}
9802 
9803 		while (j >= mddev->resync_max &&
9804 		       !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
9805 			/* As this condition is controlled by user-space,
9806 			 * we can block indefinitely, so use '_interruptible'
9807 			 * to avoid triggering warnings.
9808 			 */
9809 			flush_signals(current); /* just in case */
9810 			wait_event_interruptible(mddev->recovery_wait,
9811 						 mddev->resync_max > j
9812 						 || test_bit(MD_RECOVERY_INTR,
9813 							     &mddev->recovery));
9814 		}
9815 
9816 		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
9817 			break;
9818 
9819 		if (mddev->bitmap_ops && mddev->bitmap_ops->skip_sync_blocks) {
9820 			sectors = mddev->bitmap_ops->skip_sync_blocks(mddev, j);
9821 			if (sectors)
9822 				goto update;
9823 		}
9824 
9825 		sectors = mddev->pers->sync_request(mddev, j, max_sectors,
9826 						    &skipped);
9827 		if (sectors == 0) {
9828 			set_bit(MD_RECOVERY_INTR, &mddev->recovery);
9829 			break;
9830 		}
9831 
9832 		if (!skipped) { /* actual IO requested */
9833 			io_sectors += sectors;
9834 			atomic_add(sectors, &mddev->recovery_active);
9835 		}
9836 
9837 		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
9838 			break;
9839 
9840 update:
9841 		j += sectors;
9842 		if (j > max_sectors)
9843 			/* when skipping, extra large numbers can be returned. */
9844 			j = max_sectors;
9845 		if (j >= MD_RESYNC_ACTIVE)
9846 			mddev->curr_resync = j;
9847 		mddev->curr_mark_cnt = io_sectors;
9848 		if (last_check == 0)
9849 			/* this is the earliest that rebuild will be
9850 			 * visible in /proc/mdstat
9851 			 */
9852 			md_new_event();
9853 
9854 		if (last_check + window > io_sectors || j == max_sectors)
9855 			continue;
9856 
9857 		last_check = io_sectors;
9858 	repeat:
9859 		if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
9860 			/* step marks */
9861 			int next = (last_mark+1) % SYNC_MARKS;
9862 
9863 			mddev->resync_mark = mark[next];
9864 			mddev->resync_mark_cnt = mark_cnt[next];
9865 			mark[next] = jiffies;
9866 			mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
9867 			last_mark = next;
9868 		}
9869 
9870 		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
9871 			break;
9872 
9873 		/*
9874 		 * this loop exits only if either when we are slower than
9875 		 * the 'hard' speed limit, or the system was IO-idle for
9876 		 * a jiffy.
9877 		 * the system might be non-idle CPU-wise, but we only care
9878 		 * about not overloading the IO subsystem. (things like an
9879 		 * e2fsck being done on the RAID array should execute fast)
9880 		 */
9881 		cond_resched();
9882 
9883 		recovery_done = io_sectors - atomic_read(&mddev->recovery_active);
9884 		currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2
9885 			/((jiffies-mddev->resync_mark)/HZ +1) +1;
9886 
9887 		if (currspeed > speed_min(mddev)) {
9888 			if (currspeed > speed_max(mddev)) {
9889 				msleep(500);
9890 				goto repeat;
9891 			}
9892 			if (!sync_io_within_limit(mddev) &&
9893 			    !is_mddev_idle(mddev, 0)) {
9894 				/*
9895 				 * Give other IO more of a chance.
9896 				 * The faster the devices, the less we wait.
9897 				 */
9898 				wait_event(mddev->recovery_wait,
9899 					   !atomic_read(&mddev->recovery_active));
9900 			}
9901 		}
9902 	}
9903 	pr_info("md: %s: %s %s.\n",mdname(mddev), desc,
9904 		test_bit(MD_RECOVERY_INTR, &mddev->recovery)
9905 		? "interrupted" : "done");
9906 	/*
9907 	 * this also signals 'finished resyncing' to md_stop
9908 	 */
9909 	blk_finish_plug(&plug);
9910 	wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
9911 
9912 	if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
9913 	    mddev->curr_resync >= MD_RESYNC_ACTIVE) {
9914 		/* All sync IO completes after recovery_active becomes 0 */
9915 		mddev->curr_resync_completed = mddev->curr_resync;
9916 		sysfs_notify_dirent_safe(mddev->sysfs_completed);
9917 	}
9918 	mddev->pers->sync_request(mddev, max_sectors, max_sectors, &skipped);
9919 
9920 	if (mddev->curr_resync > MD_RESYNC_ACTIVE)
9921 		md_finish_sync(mddev, action);
9922  skip:
9923 	/* set CHANGE_PENDING here since maybe another update is needed,
9924 	 * so other nodes are informed. It should be harmless for normal
9925 	 * raid */
9926 	set_mask_bits(&mddev->sb_flags, 0,
9927 		      BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS));
9928 	spin_lock(&mddev->lock);
9929 	if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
9930 		/* We completed so min/max setting can be forgotten if used. */
9931 		if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
9932 			mddev->resync_min = 0;
9933 		mddev->resync_max = MaxSector;
9934 	} else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
9935 		mddev->resync_min = mddev->curr_resync_completed;
9936 	set_bit(MD_RECOVERY_DONE, &mddev->recovery);
9937 	mddev->curr_resync = MD_RESYNC_NONE;
9938 	spin_unlock(&mddev->lock);
9939 
9940 	wake_up(&resync_wait);
9941 	md_wakeup_thread(mddev->thread);
9942 	return;
9943 }
9944 EXPORT_SYMBOL_GPL(md_do_sync);
9945 
rdev_removeable(struct md_rdev * rdev)9946 static bool rdev_removeable(struct md_rdev *rdev)
9947 {
9948 	/* rdev is not used. */
9949 	if (rdev->raid_disk < 0)
9950 		return false;
9951 
9952 	/* There are still inflight io, don't remove this rdev. */
9953 	if (atomic_read(&rdev->nr_pending))
9954 		return false;
9955 
9956 	/*
9957 	 * An error occurred but has not yet been acknowledged by the metadata
9958 	 * handler, don't remove this rdev.
9959 	 */
9960 	if (test_bit(Blocked, &rdev->flags))
9961 		return false;
9962 
9963 	/* Fautly rdev is not used, it's safe to remove it. */
9964 	if (test_bit(Faulty, &rdev->flags))
9965 		return true;
9966 
9967 	/* Journal disk can only be removed if it's faulty. */
9968 	if (test_bit(Journal, &rdev->flags))
9969 		return false;
9970 
9971 	/*
9972 	 * 'In_sync' is cleared while 'raid_disk' is valid, which means
9973 	 * replacement has just become active from pers->spare_active(), and
9974 	 * then pers->hot_remove_disk() will replace this rdev with replacement.
9975 	 */
9976 	if (!test_bit(In_sync, &rdev->flags))
9977 		return true;
9978 
9979 	return false;
9980 }
9981 
rdev_is_spare(struct md_rdev * rdev)9982 static bool rdev_is_spare(struct md_rdev *rdev)
9983 {
9984 	return !test_bit(Candidate, &rdev->flags) && rdev->raid_disk >= 0 &&
9985 	       !test_bit(In_sync, &rdev->flags) &&
9986 	       !test_bit(Journal, &rdev->flags) &&
9987 	       !test_bit(Faulty, &rdev->flags);
9988 }
9989 
rdev_addable(struct md_rdev * rdev)9990 static bool rdev_addable(struct md_rdev *rdev)
9991 {
9992 	struct mddev *mddev;
9993 
9994 	mddev = READ_ONCE(rdev->mddev);
9995 	if (!mddev)
9996 		return false;
9997 
9998 	/* rdev is already used, don't add it again. */
9999 	if (test_bit(Candidate, &rdev->flags) || rdev->raid_disk >= 0 ||
10000 	    test_bit(Faulty, &rdev->flags))
10001 		return false;
10002 
10003 	/* Allow to add journal disk. */
10004 	if (test_bit(Journal, &rdev->flags))
10005 		return true;
10006 
10007 	/* Allow to add if array is read-write. */
10008 	if (md_is_rdwr(mddev))
10009 		return true;
10010 
10011 	/*
10012 	 * For read-only array, only allow to readd a rdev. And if bitmap is
10013 	 * used, don't allow to readd a rdev that is too old.
10014 	 */
10015 	if (rdev->saved_raid_disk >= 0 && !test_bit(Bitmap_sync, &rdev->flags))
10016 		return true;
10017 
10018 	return false;
10019 }
10020 
md_spares_need_change(struct mddev * mddev)10021 static bool md_spares_need_change(struct mddev *mddev)
10022 {
10023 	struct md_rdev *rdev;
10024 
10025 	rcu_read_lock();
10026 	rdev_for_each_rcu(rdev, mddev) {
10027 		if (rdev_removeable(rdev) || rdev_addable(rdev)) {
10028 			rcu_read_unlock();
10029 			return true;
10030 		}
10031 	}
10032 	rcu_read_unlock();
10033 	return false;
10034 }
10035 
remove_spares(struct mddev * mddev,struct md_rdev * this)10036 static int remove_spares(struct mddev *mddev, struct md_rdev *this)
10037 {
10038 	struct md_rdev *rdev;
10039 	int removed = 0;
10040 
10041 	rdev_for_each(rdev, mddev) {
10042 		if ((this == NULL || rdev == this) && rdev_removeable(rdev) &&
10043 		    !mddev->pers->hot_remove_disk(mddev, rdev)) {
10044 			sysfs_unlink_rdev(mddev, rdev);
10045 			rdev->saved_raid_disk = rdev->raid_disk;
10046 			rdev->raid_disk = -1;
10047 			removed++;
10048 		}
10049 	}
10050 
10051 	if (removed && mddev->kobj.sd)
10052 		sysfs_notify_dirent_safe(mddev->sysfs_degraded);
10053 
10054 	return removed;
10055 }
10056 
remove_and_add_spares(struct mddev * mddev,struct md_rdev * this)10057 static int remove_and_add_spares(struct mddev *mddev,
10058 				 struct md_rdev *this)
10059 {
10060 	struct md_rdev *rdev;
10061 	int spares = 0;
10062 	int removed = 0;
10063 
10064 	if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
10065 		/* Mustn't remove devices when resync thread is running */
10066 		return 0;
10067 
10068 	removed = remove_spares(mddev, this);
10069 	if (this && removed)
10070 		goto no_add;
10071 
10072 	rdev_for_each(rdev, mddev) {
10073 		if (this && this != rdev)
10074 			continue;
10075 		if (rdev_is_spare(rdev))
10076 			spares++;
10077 		if (!rdev_addable(rdev))
10078 			continue;
10079 		if (!test_bit(Journal, &rdev->flags))
10080 			rdev->recovery_offset = 0;
10081 		if (mddev->pers->hot_add_disk(mddev, rdev) == 0) {
10082 			/* failure here is OK */
10083 			sysfs_link_rdev(mddev, rdev);
10084 			if (!test_bit(Journal, &rdev->flags))
10085 				spares++;
10086 			md_new_event();
10087 			set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
10088 		}
10089 	}
10090 no_add:
10091 	if (removed)
10092 		set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
10093 	return spares;
10094 }
10095 
md_choose_sync_action(struct mddev * mddev,int * spares)10096 static bool md_choose_sync_action(struct mddev *mddev, int *spares)
10097 {
10098 	/* Check if reshape is in progress first. */
10099 	if (mddev->reshape_position != MaxSector) {
10100 		if (mddev->pers->check_reshape == NULL ||
10101 		    mddev->pers->check_reshape(mddev) != 0)
10102 			return false;
10103 
10104 		set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
10105 		clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
10106 		clear_bit(MD_RECOVERY_LAZY_RECOVER, &mddev->recovery);
10107 		return true;
10108 	}
10109 
10110 	/* Check if resync is in progress. */
10111 	if (mddev->resync_offset < MaxSector) {
10112 		remove_spares(mddev, NULL);
10113 		set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
10114 		clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
10115 		clear_bit(MD_RECOVERY_LAZY_RECOVER, &mddev->recovery);
10116 		return true;
10117 	}
10118 
10119 	/*
10120 	 * Remove any failed drives, then add spares if possible. Spares are
10121 	 * also removed and re-added, to allow the personality to fail the
10122 	 * re-add.
10123 	 */
10124 	*spares = remove_and_add_spares(mddev, NULL);
10125 	if (*spares || test_bit(MD_RECOVERY_LAZY_RECOVER, &mddev->recovery)) {
10126 		clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
10127 		clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
10128 		clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
10129 
10130 		/* Start new recovery. */
10131 		set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
10132 		return true;
10133 	}
10134 
10135 	/* Delay to choose resync/check/repair in md_do_sync(). */
10136 	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
10137 		return true;
10138 
10139 	/* Nothing to be done */
10140 	return false;
10141 }
10142 
md_start_sync(struct work_struct * ws)10143 static void md_start_sync(struct work_struct *ws)
10144 {
10145 	struct mddev *mddev = container_of(ws, struct mddev, sync_work);
10146 	int spares = 0;
10147 	bool suspend = false;
10148 	char *name;
10149 
10150 	/*
10151 	 * If reshape is still in progress, spares won't be added or removed
10152 	 * from conf until reshape is done.
10153 	 */
10154 	if (mddev->reshape_position == MaxSector &&
10155 	    md_spares_need_change(mddev)) {
10156 		suspend = true;
10157 		mddev_suspend(mddev, false);
10158 	}
10159 
10160 	mddev_lock_nointr(mddev);
10161 	if (!md_is_rdwr(mddev)) {
10162 		/*
10163 		 * On a read-only array we can:
10164 		 * - remove failed devices
10165 		 * - add already-in_sync devices if the array itself is in-sync.
10166 		 * As we only add devices that are already in-sync, we can
10167 		 * activate the spares immediately.
10168 		 */
10169 		remove_and_add_spares(mddev, NULL);
10170 		goto not_running;
10171 	}
10172 
10173 	if (!md_choose_sync_action(mddev, &spares))
10174 		goto not_running;
10175 
10176 	if (!mddev->pers->sync_request)
10177 		goto not_running;
10178 
10179 	/*
10180 	 * We are adding a device or devices to an array which has the bitmap
10181 	 * stored on all devices. So make sure all bitmap pages get written.
10182 	 */
10183 	if (spares && md_bitmap_enabled(mddev, true))
10184 		mddev->bitmap_ops->write_all(mddev);
10185 
10186 	name = test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ?
10187 			"reshape" : "resync";
10188 	rcu_assign_pointer(mddev->sync_thread,
10189 			   md_register_thread(md_do_sync, mddev, name));
10190 	if (!mddev->sync_thread) {
10191 		pr_warn("%s: could not start resync thread...\n",
10192 			mdname(mddev));
10193 		/* leave the spares where they are, it shouldn't hurt */
10194 		goto not_running;
10195 	}
10196 
10197 	mddev_unlock(mddev);
10198 	/*
10199 	 * md_start_sync was triggered by MD_RECOVERY_NEEDED, so we should
10200 	 * not set it again. Otherwise, we may cause issue like this one:
10201 	 *     https://bugzilla.kernel.org/show_bug.cgi?id=218200
10202 	 * Therefore, use __mddev_resume(mddev, false).
10203 	 */
10204 	if (suspend)
10205 		__mddev_resume(mddev, false);
10206 	md_wakeup_thread(mddev->sync_thread);
10207 	sysfs_notify_dirent_safe(mddev->sysfs_action);
10208 	md_new_event();
10209 	return;
10210 
10211 not_running:
10212 	clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
10213 	clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
10214 	clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
10215 	clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
10216 	clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
10217 	mddev_unlock(mddev);
10218 	/*
10219 	 * md_start_sync was triggered by MD_RECOVERY_NEEDED, so we should
10220 	 * not set it again. Otherwise, we may cause issue like this one:
10221 	 *     https://bugzilla.kernel.org/show_bug.cgi?id=218200
10222 	 * Therefore, use __mddev_resume(mddev, false).
10223 	 */
10224 	if (suspend)
10225 		__mddev_resume(mddev, false);
10226 
10227 	wake_up(&resync_wait);
10228 	if (test_and_clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery) &&
10229 	    mddev->sysfs_action)
10230 		sysfs_notify_dirent_safe(mddev->sysfs_action);
10231 }
10232 
unregister_sync_thread(struct mddev * mddev)10233 static void unregister_sync_thread(struct mddev *mddev)
10234 {
10235 	if (!test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
10236 		/* resync/recovery still happening */
10237 		clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
10238 		return;
10239 	}
10240 
10241 	if (WARN_ON_ONCE(!mddev->sync_thread))
10242 		return;
10243 
10244 	md_reap_sync_thread(mddev);
10245 }
10246 
md_should_do_recovery(struct mddev * mddev)10247 static bool md_should_do_recovery(struct mddev *mddev)
10248 {
10249 	/*
10250 	 * As long as one of the following flags is set,
10251 	 * recovery needs to do or cleanup.
10252 	 */
10253 	if (test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
10254 	    test_bit(MD_RECOVERY_DONE, &mddev->recovery))
10255 		return true;
10256 
10257 	/*
10258 	 * If no flags are set and it is in read-only status,
10259 	 * there is nothing to do.
10260 	 */
10261 	if (!md_is_rdwr(mddev))
10262 		return false;
10263 
10264 	/*
10265 	 * MD_SB_CHANGE_PENDING indicates that the array is switching from clean to
10266 	 * active, and no action is needed for now.
10267 	 * All other MD_SB_* flags require to update the superblock.
10268 	 */
10269 	if (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING))
10270 		return true;
10271 
10272 	/*
10273 	 * If the array is not using external metadata and there has been no data
10274 	 * written for some time, then the array's status needs to be set to
10275 	 * in_sync.
10276 	 */
10277 	if (mddev->external == 0 && mddev->safemode == 1)
10278 		return true;
10279 
10280 	/*
10281 	 * When the system is about to restart or the process receives an signal,
10282 	 * the array needs to be synchronized as soon as possible.
10283 	 * Once the data synchronization is completed, need to change the array
10284 	 * status to in_sync.
10285 	 */
10286 	if (mddev->safemode == 2 && !mddev->in_sync &&
10287 	    mddev->resync_offset == MaxSector)
10288 		return true;
10289 
10290 	return false;
10291 }
10292 
10293 /*
10294  * This routine is regularly called by all per-raid-array threads to
10295  * deal with generic issues like resync and super-block update.
10296  * Raid personalities that don't have a thread (linear/raid0) do not
10297  * need this as they never do any recovery or update the superblock.
10298  *
10299  * It does not do any resync itself, but rather "forks" off other threads
10300  * to do that as needed.
10301  * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
10302  * "->recovery" and create a thread at ->sync_thread.
10303  * When the thread finishes it sets MD_RECOVERY_DONE
10304  * and wakeups up this thread which will reap the thread and finish up.
10305  * This thread also removes any faulty devices (with nr_pending == 0).
10306  *
10307  * The overall approach is:
10308  *  1/ if the superblock needs updating, update it.
10309  *  2/ If a recovery thread is running, don't do anything else.
10310  *  3/ If recovery has finished, clean up, possibly marking spares active.
10311  *  4/ If there are any faulty devices, remove them.
10312  *  5/ If array is degraded, try to add spares devices
10313  *  6/ If array has spares or is not in-sync, start a resync thread.
10314  */
md_check_recovery(struct mddev * mddev)10315 void md_check_recovery(struct mddev *mddev)
10316 {
10317 	if (md_bitmap_enabled(mddev, false) && mddev->bitmap_ops->daemon_work)
10318 		mddev->bitmap_ops->daemon_work(mddev);
10319 
10320 	if (signal_pending(current)) {
10321 		if (mddev->pers->sync_request && !mddev->external) {
10322 			pr_debug("md: %s in immediate safe mode\n",
10323 				 mdname(mddev));
10324 			mddev->safemode = 2;
10325 		}
10326 		flush_signals(current);
10327 	}
10328 
10329 	if (!md_should_do_recovery(mddev))
10330 		return;
10331 
10332 	if (mddev_trylock(mddev)) {
10333 		bool try_set_sync = mddev->safemode != 0;
10334 
10335 		if (!mddev->external && mddev->safemode == 1)
10336 			mddev->safemode = 0;
10337 
10338 		if (!md_is_rdwr(mddev)) {
10339 			struct md_rdev *rdev;
10340 
10341 			if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
10342 				unregister_sync_thread(mddev);
10343 				goto unlock;
10344 			}
10345 
10346 			if (!mddev->external && mddev->in_sync)
10347 				/*
10348 				 * 'Blocked' flag not needed as failed devices
10349 				 * will be recorded if array switched to read/write.
10350 				 * Leaving it set will prevent the device
10351 				 * from being removed.
10352 				 */
10353 				rdev_for_each(rdev, mddev)
10354 					clear_bit(Blocked, &rdev->flags);
10355 
10356 			/*
10357 			 * There is no thread, but we need to call
10358 			 * ->spare_active and clear saved_raid_disk
10359 			 */
10360 			set_bit(MD_RECOVERY_INTR, &mddev->recovery);
10361 			md_reap_sync_thread(mddev);
10362 
10363 			/*
10364 			 * Let md_start_sync() to remove and add rdevs to the
10365 			 * array.
10366 			 */
10367 			if (md_spares_need_change(mddev)) {
10368 				set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
10369 				queue_work(md_misc_wq, &mddev->sync_work);
10370 			}
10371 
10372 			clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
10373 			clear_bit(MD_RECOVERY_LAZY_RECOVER, &mddev->recovery);
10374 			clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
10375 			clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
10376 
10377 			goto unlock;
10378 		}
10379 
10380 		if (mddev_is_clustered(mddev)) {
10381 			struct md_rdev *rdev, *tmp;
10382 			/* kick the device if another node issued a
10383 			 * remove disk.
10384 			 */
10385 			rdev_for_each_safe(rdev, tmp, mddev) {
10386 				if (rdev->raid_disk < 0 &&
10387 				    test_and_clear_bit(ClusterRemove, &rdev->flags))
10388 					md_kick_rdev_from_array(rdev);
10389 			}
10390 		}
10391 
10392 		if (try_set_sync && !mddev->external && !mddev->in_sync) {
10393 			spin_lock(&mddev->lock);
10394 			set_in_sync(mddev);
10395 			spin_unlock(&mddev->lock);
10396 		}
10397 
10398 		if (mddev->sb_flags)
10399 			md_update_sb(mddev, 0);
10400 
10401 		/*
10402 		 * Never start a new sync thread if MD_RECOVERY_RUNNING is
10403 		 * still set.
10404 		 */
10405 		if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
10406 			unregister_sync_thread(mddev);
10407 			goto unlock;
10408 		}
10409 
10410 		/* Set RUNNING before clearing NEEDED to avoid
10411 		 * any transients in the value of "sync_action".
10412 		 */
10413 		mddev->curr_resync_completed = 0;
10414 		spin_lock(&mddev->lock);
10415 		set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
10416 		spin_unlock(&mddev->lock);
10417 		/* Clear some bits that don't mean anything, but
10418 		 * might be left set
10419 		 */
10420 		clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
10421 		clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
10422 
10423 		if (test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) &&
10424 		    !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
10425 			queue_work(md_misc_wq, &mddev->sync_work);
10426 		} else {
10427 			clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
10428 			wake_up(&resync_wait);
10429 		}
10430 
10431 	unlock:
10432 		wake_up(&mddev->sb_wait);
10433 		mddev_unlock(mddev);
10434 	}
10435 }
10436 EXPORT_SYMBOL(md_check_recovery);
10437 
md_reap_sync_thread(struct mddev * mddev)10438 void md_reap_sync_thread(struct mddev *mddev)
10439 {
10440 	struct md_rdev *rdev;
10441 	sector_t old_dev_sectors = mddev->dev_sectors;
10442 	bool is_reshaped = test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
10443 
10444 	/* resync has finished, collect result */
10445 	md_unregister_thread(mddev, &mddev->sync_thread);
10446 	atomic_inc(&mddev->sync_seq);
10447 
10448 	if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
10449 	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
10450 	    mddev->degraded != mddev->raid_disks) {
10451 		/* success...*/
10452 		/* activate any spares */
10453 		if (mddev->pers->spare_active(mddev)) {
10454 			sysfs_notify_dirent_safe(mddev->sysfs_degraded);
10455 			set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
10456 		}
10457 	}
10458 
10459 	/* If array is no-longer degraded, then any saved_raid_disk
10460 	 * information must be scrapped.
10461 	 */
10462 	if (!mddev->degraded)
10463 		rdev_for_each(rdev, mddev)
10464 			rdev->saved_raid_disk = -1;
10465 
10466 	md_update_sb(mddev, 1);
10467 	/* MD_SB_CHANGE_PENDING should be cleared by md_update_sb, so we can
10468 	 * call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by
10469 	 * clustered raid */
10470 	if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags))
10471 		mddev->cluster_ops->resync_finish(mddev);
10472 	clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
10473 	clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
10474 	clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
10475 	clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
10476 	clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
10477 	clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
10478 	clear_bit(MD_RECOVERY_LAZY_RECOVER, &mddev->recovery);
10479 	/*
10480 	 * We call mddev->cluster_ops->update_size here because sync_size could
10481 	 * be changed by md_update_sb, and MD_RECOVERY_RESHAPE is cleared,
10482 	 * so it is time to update size across cluster.
10483 	 */
10484 	if (mddev_is_clustered(mddev) && is_reshaped &&
10485 	    mddev->pers->finish_reshape &&
10486 	    !test_bit(MD_CLOSING, &mddev->flags))
10487 		mddev->cluster_ops->update_size(mddev, old_dev_sectors);
10488 	/* flag recovery needed just to double check */
10489 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
10490 	sysfs_notify_dirent_safe(mddev->sysfs_completed);
10491 	sysfs_notify_dirent_safe(mddev->sysfs_action);
10492 	md_new_event();
10493 	if (mddev->event_work.func)
10494 		queue_work(md_misc_wq, &mddev->event_work);
10495 	wake_up(&resync_wait);
10496 }
10497 EXPORT_SYMBOL(md_reap_sync_thread);
10498 
md_wait_for_blocked_rdev(struct md_rdev * rdev,struct mddev * mddev)10499 void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
10500 {
10501 	sysfs_notify_dirent_safe(rdev->sysfs_state);
10502 	wait_event_timeout(rdev->blocked_wait, !rdev_blocked(rdev),
10503 			   msecs_to_jiffies(5000));
10504 	rdev_dec_pending(rdev, mddev);
10505 }
10506 EXPORT_SYMBOL(md_wait_for_blocked_rdev);
10507 
md_finish_reshape(struct mddev * mddev)10508 void md_finish_reshape(struct mddev *mddev)
10509 {
10510 	/* called be personality module when reshape completes. */
10511 	struct md_rdev *rdev;
10512 
10513 	rdev_for_each(rdev, mddev) {
10514 		if (rdev->data_offset > rdev->new_data_offset)
10515 			rdev->sectors += rdev->data_offset - rdev->new_data_offset;
10516 		else
10517 			rdev->sectors -= rdev->new_data_offset - rdev->data_offset;
10518 		rdev->data_offset = rdev->new_data_offset;
10519 	}
10520 }
10521 EXPORT_SYMBOL(md_finish_reshape);
10522 
10523 /* Bad block management */
10524 
10525 /* Returns true on success, false on failure */
rdev_set_badblocks(struct md_rdev * rdev,sector_t s,int sectors,int is_new)10526 bool rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
10527 			int is_new)
10528 {
10529 	struct mddev *mddev = rdev->mddev;
10530 
10531 	/*
10532 	 * Recording new badblocks for faulty rdev will force unnecessary
10533 	 * super block updating. This is fragile for external management because
10534 	 * userspace daemon may trying to remove this device and deadlock may
10535 	 * occur. This will be probably solved in the mdadm, but it is safer to
10536 	 * avoid it.
10537 	 */
10538 	if (test_bit(Faulty, &rdev->flags))
10539 		return true;
10540 
10541 	if (is_new)
10542 		s += rdev->new_data_offset;
10543 	else
10544 		s += rdev->data_offset;
10545 
10546 	if (!badblocks_set(&rdev->badblocks, s, sectors, 0)) {
10547 		/*
10548 		 * Mark the disk as Faulty when setting badblocks fails,
10549 		 * otherwise, bad sectors may be read.
10550 		 */
10551 		md_error(mddev, rdev);
10552 		return false;
10553 	}
10554 
10555 	/* Make sure they get written out promptly */
10556 	if (test_bit(ExternalBbl, &rdev->flags))
10557 		sysfs_notify_dirent_safe(rdev->sysfs_unack_badblocks);
10558 	sysfs_notify_dirent_safe(rdev->sysfs_state);
10559 	set_mask_bits(&mddev->sb_flags, 0,
10560 		      BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING));
10561 	md_wakeup_thread(rdev->mddev->thread);
10562 	return true;
10563 }
10564 EXPORT_SYMBOL_GPL(rdev_set_badblocks);
10565 
rdev_clear_badblocks(struct md_rdev * rdev,sector_t s,int sectors,int is_new)10566 void rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
10567 			  int is_new)
10568 {
10569 	if (is_new)
10570 		s += rdev->new_data_offset;
10571 	else
10572 		s += rdev->data_offset;
10573 
10574 	if (!badblocks_clear(&rdev->badblocks, s, sectors))
10575 		return;
10576 
10577 	if (test_bit(ExternalBbl, &rdev->flags))
10578 		sysfs_notify_dirent_safe(rdev->sysfs_badblocks);
10579 }
10580 EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
10581 
md_notify_reboot(struct notifier_block * this,unsigned long code,void * x)10582 static int md_notify_reboot(struct notifier_block *this,
10583 			    unsigned long code, void *x)
10584 {
10585 	struct mddev *mddev;
10586 
10587 	spin_lock(&all_mddevs_lock);
10588 	list_for_each_entry(mddev, &all_mddevs, all_mddevs) {
10589 		if (!mddev_get(mddev))
10590 			continue;
10591 		spin_unlock(&all_mddevs_lock);
10592 		if (mddev_trylock(mddev)) {
10593 			if (mddev->pers)
10594 				__md_stop_writes(mddev);
10595 			if (mddev->persistent)
10596 				mddev->safemode = 2;
10597 			mddev_unlock(mddev);
10598 		}
10599 		spin_lock(&all_mddevs_lock);
10600 		mddev_put_locked(mddev);
10601 	}
10602 	spin_unlock(&all_mddevs_lock);
10603 
10604 	return NOTIFY_DONE;
10605 }
10606 
10607 static struct notifier_block md_notifier = {
10608 	.notifier_call	= md_notify_reboot,
10609 	.next		= NULL,
10610 	.priority	= INT_MAX, /* before any real devices */
10611 };
10612 
md_geninit(void)10613 static void md_geninit(void)
10614 {
10615 	pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
10616 
10617 	proc_create("mdstat", S_IRUGO, NULL, &mdstat_proc_ops);
10618 }
10619 
md_init(void)10620 static int __init md_init(void)
10621 {
10622 	int ret = md_bitmap_init();
10623 
10624 	if (ret)
10625 		return ret;
10626 
10627 	ret = md_llbitmap_init();
10628 	if (ret)
10629 		goto err_bitmap;
10630 
10631 	ret = -ENOMEM;
10632 	md_misc_wq = alloc_workqueue("md_misc", WQ_PERCPU, 0);
10633 	if (!md_misc_wq)
10634 		goto err_misc_wq;
10635 
10636 	ret = __register_blkdev(MD_MAJOR, "md", md_probe);
10637 	if (ret < 0)
10638 		goto err_md;
10639 
10640 	ret = __register_blkdev(0, "mdp", md_probe);
10641 	if (ret < 0)
10642 		goto err_mdp;
10643 	mdp_major = ret;
10644 
10645 	register_reboot_notifier(&md_notifier);
10646 	raid_table_header = register_sysctl("dev/raid", raid_table);
10647 
10648 	md_geninit();
10649 	return 0;
10650 
10651 err_mdp:
10652 	unregister_blkdev(MD_MAJOR, "md");
10653 err_md:
10654 	destroy_workqueue(md_misc_wq);
10655 err_misc_wq:
10656 	md_llbitmap_exit();
10657 err_bitmap:
10658 	md_bitmap_exit();
10659 	return ret;
10660 }
10661 
check_sb_changes(struct mddev * mddev,struct md_rdev * rdev)10662 static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
10663 {
10664 	struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
10665 	struct md_rdev *rdev2, *tmp;
10666 	int role, ret;
10667 
10668 	/*
10669 	 * If size is changed in another node then we need to
10670 	 * do resize as well.
10671 	 */
10672 	if (mddev->dev_sectors != le64_to_cpu(sb->size)) {
10673 		ret = mddev->pers->resize(mddev, le64_to_cpu(sb->size));
10674 		if (ret)
10675 			pr_info("md-cluster: resize failed\n");
10676 		else if (md_bitmap_enabled(mddev, false))
10677 			mddev->bitmap_ops->update_sb(mddev->bitmap);
10678 	}
10679 
10680 	/* Check for change of roles in the active devices */
10681 	rdev_for_each_safe(rdev2, tmp, mddev) {
10682 		if (test_bit(Faulty, &rdev2->flags)) {
10683 			if (test_bit(ClusterRemove, &rdev2->flags))
10684 				set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
10685 			continue;
10686 		}
10687 
10688 		/* Check if the roles changed */
10689 		role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]);
10690 
10691 		if (test_bit(Candidate, &rdev2->flags)) {
10692 			if (role == MD_DISK_ROLE_FAULTY) {
10693 				pr_info("md: Removing Candidate device %pg because add failed\n",
10694 					rdev2->bdev);
10695 				md_kick_rdev_from_array(rdev2);
10696 				continue;
10697 			}
10698 			else
10699 				clear_bit(Candidate, &rdev2->flags);
10700 		}
10701 
10702 		if (role != rdev2->raid_disk) {
10703 			/*
10704 			 * got activated except reshape is happening.
10705 			 */
10706 			if (rdev2->raid_disk == -1 && role != MD_DISK_ROLE_SPARE &&
10707 			    !(le32_to_cpu(sb->feature_map) &
10708 			      MD_FEATURE_RESHAPE_ACTIVE) &&
10709 			    !mddev->cluster_ops->resync_status_get(mddev)) {
10710 				/*
10711 				 * -1 to make raid1_add_disk() set conf->fullsync
10712 				 * to 1. This could avoid skipping sync when the
10713 				 * remote node is down during resyncing.
10714 				 */
10715 				if ((le32_to_cpu(sb->feature_map)
10716 				    & MD_FEATURE_RECOVERY_OFFSET))
10717 					rdev2->saved_raid_disk = -1;
10718 				else
10719 					rdev2->saved_raid_disk = role;
10720 				ret = remove_and_add_spares(mddev, rdev2);
10721 				pr_info("Activated spare: %pg\n",
10722 					rdev2->bdev);
10723 				/* wakeup mddev->thread here, so array could
10724 				 * perform resync with the new activated disk */
10725 				set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
10726 				md_wakeup_thread(mddev->thread);
10727 			}
10728 			/* device faulty
10729 			 * We just want to do the minimum to mark the disk
10730 			 * as faulty. The recovery is performed by the
10731 			 * one who initiated the error.
10732 			 */
10733 			if (role == MD_DISK_ROLE_FAULTY ||
10734 			    role == MD_DISK_ROLE_JOURNAL) {
10735 				md_error(mddev, rdev2);
10736 				clear_bit(Blocked, &rdev2->flags);
10737 			}
10738 		}
10739 	}
10740 
10741 	if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) {
10742 		ret = update_raid_disks(mddev, le32_to_cpu(sb->raid_disks));
10743 		if (ret)
10744 			pr_warn("md: updating array disks failed. %d\n", ret);
10745 	}
10746 
10747 	/*
10748 	 * Since mddev->delta_disks has already updated in update_raid_disks,
10749 	 * so it is time to check reshape.
10750 	 */
10751 	if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
10752 	    (le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
10753 		/*
10754 		 * reshape is happening in the remote node, we need to
10755 		 * update reshape_position and call start_reshape.
10756 		 */
10757 		mddev->reshape_position = le64_to_cpu(sb->reshape_position);
10758 		if (mddev->pers->update_reshape_pos)
10759 			mddev->pers->update_reshape_pos(mddev);
10760 		if (mddev->pers->start_reshape)
10761 			mddev->pers->start_reshape(mddev);
10762 	} else if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
10763 		   mddev->reshape_position != MaxSector &&
10764 		   !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
10765 		/* reshape is just done in another node. */
10766 		mddev->reshape_position = MaxSector;
10767 		if (mddev->pers->update_reshape_pos)
10768 			mddev->pers->update_reshape_pos(mddev);
10769 	}
10770 
10771 	/* Finally set the event to be up to date */
10772 	mddev->events = le64_to_cpu(sb->events);
10773 }
10774 
read_rdev(struct mddev * mddev,struct md_rdev * rdev)10775 static int read_rdev(struct mddev *mddev, struct md_rdev *rdev)
10776 {
10777 	int err;
10778 	struct page *swapout = rdev->sb_page;
10779 	struct mdp_superblock_1 *sb;
10780 
10781 	/* Store the sb page of the rdev in the swapout temporary
10782 	 * variable in case we err in the future
10783 	 */
10784 	rdev->sb_page = NULL;
10785 	err = alloc_disk_sb(rdev);
10786 	if (err == 0) {
10787 		ClearPageUptodate(rdev->sb_page);
10788 		rdev->sb_loaded = 0;
10789 		err = super_types[mddev->major_version].
10790 			load_super(rdev, NULL, mddev->minor_version);
10791 	}
10792 	if (err < 0) {
10793 		pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n",
10794 				__func__, __LINE__, rdev->desc_nr, err);
10795 		if (rdev->sb_page)
10796 			put_page(rdev->sb_page);
10797 		rdev->sb_page = swapout;
10798 		rdev->sb_loaded = 1;
10799 		return err;
10800 	}
10801 
10802 	sb = page_address(rdev->sb_page);
10803 	/* Read the offset unconditionally, even if MD_FEATURE_RECOVERY_OFFSET
10804 	 * is not set
10805 	 */
10806 
10807 	if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_OFFSET))
10808 		rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
10809 
10810 	/* The other node finished recovery, call spare_active to set
10811 	 * device In_sync and mddev->degraded
10812 	 */
10813 	if (rdev->recovery_offset == MaxSector &&
10814 	    !test_bit(In_sync, &rdev->flags) &&
10815 	    mddev->pers->spare_active(mddev))
10816 		sysfs_notify_dirent_safe(mddev->sysfs_degraded);
10817 
10818 	put_page(swapout);
10819 	return 0;
10820 }
10821 
md_reload_sb(struct mddev * mddev,int nr)10822 void md_reload_sb(struct mddev *mddev, int nr)
10823 {
10824 	struct md_rdev *rdev = NULL, *iter;
10825 	int err;
10826 
10827 	/* Find the rdev */
10828 	rdev_for_each_rcu(iter, mddev) {
10829 		if (iter->desc_nr == nr) {
10830 			rdev = iter;
10831 			break;
10832 		}
10833 	}
10834 
10835 	if (!rdev) {
10836 		pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr);
10837 		return;
10838 	}
10839 
10840 	err = read_rdev(mddev, rdev);
10841 	if (err < 0)
10842 		return;
10843 
10844 	check_sb_changes(mddev, rdev);
10845 
10846 	/* Read all rdev's to update recovery_offset */
10847 	rdev_for_each_rcu(rdev, mddev) {
10848 		if (!test_bit(Faulty, &rdev->flags))
10849 			read_rdev(mddev, rdev);
10850 	}
10851 }
10852 EXPORT_SYMBOL(md_reload_sb);
10853 
10854 #ifndef MODULE
10855 
10856 /*
10857  * Searches all registered partitions for autorun RAID arrays
10858  * at boot time.
10859  */
10860 
10861 static DEFINE_MUTEX(detected_devices_mutex);
10862 static LIST_HEAD(all_detected_devices);
10863 struct detected_devices_node {
10864 	struct list_head list;
10865 	dev_t dev;
10866 };
10867 
md_autodetect_dev(dev_t dev)10868 void md_autodetect_dev(dev_t dev)
10869 {
10870 	struct detected_devices_node *node_detected_dev;
10871 
10872 	node_detected_dev = kzalloc_obj(*node_detected_dev);
10873 	if (node_detected_dev) {
10874 		node_detected_dev->dev = dev;
10875 		mutex_lock(&detected_devices_mutex);
10876 		list_add_tail(&node_detected_dev->list, &all_detected_devices);
10877 		mutex_unlock(&detected_devices_mutex);
10878 	}
10879 }
10880 
md_autostart_arrays(int part)10881 void md_autostart_arrays(int part)
10882 {
10883 	struct md_rdev *rdev;
10884 	struct detected_devices_node *node_detected_dev;
10885 	dev_t dev;
10886 	int i_scanned, i_passed;
10887 
10888 	i_scanned = 0;
10889 	i_passed = 0;
10890 
10891 	pr_info("md: Autodetecting RAID arrays.\n");
10892 
10893 	mutex_lock(&detected_devices_mutex);
10894 	while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
10895 		i_scanned++;
10896 		node_detected_dev = list_entry(all_detected_devices.next,
10897 					struct detected_devices_node, list);
10898 		list_del(&node_detected_dev->list);
10899 		dev = node_detected_dev->dev;
10900 		kfree(node_detected_dev);
10901 		mutex_unlock(&detected_devices_mutex);
10902 		rdev = md_import_device(dev,0, 90);
10903 		mutex_lock(&detected_devices_mutex);
10904 		if (IS_ERR(rdev))
10905 			continue;
10906 
10907 		if (test_bit(Faulty, &rdev->flags))
10908 			continue;
10909 
10910 		set_bit(AutoDetected, &rdev->flags);
10911 		list_add(&rdev->same_set, &pending_raid_disks);
10912 		i_passed++;
10913 	}
10914 	mutex_unlock(&detected_devices_mutex);
10915 
10916 	pr_debug("md: Scanned %d and added %d devices.\n", i_scanned, i_passed);
10917 
10918 	autorun_devices(part);
10919 }
10920 
10921 #endif /* !MODULE */
10922 
md_exit(void)10923 static __exit void md_exit(void)
10924 {
10925 	struct mddev *mddev;
10926 	int delay = 1;
10927 
10928 	unregister_blkdev(MD_MAJOR,"md");
10929 	unregister_blkdev(mdp_major, "mdp");
10930 	unregister_reboot_notifier(&md_notifier);
10931 	unregister_sysctl_table(raid_table_header);
10932 
10933 	/* We cannot unload the modules while some process is
10934 	 * waiting for us in select() or poll() - wake them up
10935 	 */
10936 	md_unloading = 1;
10937 	while (waitqueue_active(&md_event_waiters)) {
10938 		/* not safe to leave yet */
10939 		wake_up(&md_event_waiters);
10940 		msleep(delay);
10941 		delay += delay;
10942 	}
10943 	remove_proc_entry("mdstat", NULL);
10944 
10945 	spin_lock(&all_mddevs_lock);
10946 	list_for_each_entry(mddev, &all_mddevs, all_mddevs) {
10947 		if (!mddev_get(mddev))
10948 			continue;
10949 		spin_unlock(&all_mddevs_lock);
10950 		export_array(mddev);
10951 		mddev->ctime = 0;
10952 		mddev->hold_active = 0;
10953 		/*
10954 		 * As the mddev is now fully clear, mddev_put will schedule
10955 		 * the mddev for destruction by a workqueue, and the
10956 		 * destroy_workqueue() below will wait for that to complete.
10957 		 */
10958 		spin_lock(&all_mddevs_lock);
10959 		mddev_put_locked(mddev);
10960 	}
10961 	spin_unlock(&all_mddevs_lock);
10962 
10963 	destroy_workqueue(md_misc_wq);
10964 	md_bitmap_exit();
10965 }
10966 
10967 subsys_initcall(md_init);
module_exit(md_exit)10968 module_exit(md_exit)
10969 
10970 static int get_ro(char *buffer, const struct kernel_param *kp)
10971 {
10972 	return sprintf(buffer, "%d\n", start_readonly);
10973 }
set_ro(const char * val,const struct kernel_param * kp)10974 static int set_ro(const char *val, const struct kernel_param *kp)
10975 {
10976 	return kstrtouint(val, 10, (unsigned int *)&start_readonly);
10977 }
10978 
10979 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
10980 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
10981 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
10982 module_param(create_on_open, bool, S_IRUSR|S_IWUSR);
10983 module_param(legacy_async_del_gendisk, bool, 0600);
10984 module_param(check_new_feature, bool, 0600);
10985 
10986 MODULE_LICENSE("GPL");
10987 MODULE_DESCRIPTION("MD RAID framework");
10988 MODULE_ALIAS("md");
10989 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);
10990