GNU Linux-libre 5.10.217-gnu1
[releases.git] / drivers / md / md.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3    md.c : Multiple Devices driver for Linux
4      Copyright (C) 1998, 1999, 2000 Ingo Molnar
5
6      completely rewritten, based on the MD driver code from Marc Zyngier
7
8    Changes:
9
10    - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
11    - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
12    - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
13    - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
14    - kmod support by: Cyrus Durgin
15    - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
16    - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
17
18    - lots of fixes and improvements to the RAID1/RAID5 and generic
19      RAID code (such as request based resynchronization):
20
21      Neil Brown <neilb@cse.unsw.edu.au>.
22
23    - persistent bitmap code
24      Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
25
26
27    Errors, Warnings, etc.
28    Please use:
29      pr_crit() for error conditions that risk data loss
30      pr_err() for error conditions that are unexpected, like an IO error
31          or internal inconsistency
32      pr_warn() for error conditions that could have been predicated, like
33          adding a device to an array when it has incompatible metadata
34      pr_info() for every interesting, very rare events, like an array starting
35          or stopping, or resync starting or stopping
36      pr_debug() for everything else.
37
38 */
39
40 #include <linux/sched/mm.h>
41 #include <linux/sched/signal.h>
42 #include <linux/kthread.h>
43 #include <linux/blkdev.h>
44 #include <linux/badblocks.h>
45 #include <linux/sysctl.h>
46 #include <linux/seq_file.h>
47 #include <linux/fs.h>
48 #include <linux/poll.h>
49 #include <linux/ctype.h>
50 #include <linux/string.h>
51 #include <linux/hdreg.h>
52 #include <linux/proc_fs.h>
53 #include <linux/random.h>
54 #include <linux/module.h>
55 #include <linux/reboot.h>
56 #include <linux/file.h>
57 #include <linux/compat.h>
58 #include <linux/delay.h>
59 #include <linux/raid/md_p.h>
60 #include <linux/raid/md_u.h>
61 #include <linux/raid/detect.h>
62 #include <linux/slab.h>
63 #include <linux/percpu-refcount.h>
64 #include <linux/part_stat.h>
65
66 #include <trace/events/block.h>
67 #include "md.h"
68 #include "md-bitmap.h"
69 #include "md-cluster.h"
70
71 /* pers_list is a list of registered personalities protected
72  * by pers_lock.
73  * pers_lock does extra service to protect accesses to
74  * mddev->thread when the mutex cannot be held.
75  */
76 static LIST_HEAD(pers_list);
77 static DEFINE_SPINLOCK(pers_lock);
78
79 static struct kobj_type md_ktype;
80
81 struct md_cluster_operations *md_cluster_ops;
82 EXPORT_SYMBOL(md_cluster_ops);
83 static struct module *md_cluster_mod;
84
85 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
86 static struct workqueue_struct *md_wq;
87 static struct workqueue_struct *md_misc_wq;
88 static struct workqueue_struct *md_rdev_misc_wq;
89
90 static int remove_and_add_spares(struct mddev *mddev,
91                                  struct md_rdev *this);
92 static void mddev_detach(struct mddev *mddev);
93
94 /*
95  * Default number of read corrections we'll attempt on an rdev
96  * before ejecting it from the array. We divide the read error
97  * count by 2 for every hour elapsed between read errors.
98  */
99 #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
100 /* Default safemode delay: 200 msec */
101 #define DEFAULT_SAFEMODE_DELAY ((200 * HZ)/1000 +1)
102 /*
103  * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
104  * is 1000 KB/sec, so the extra system load does not show up that much.
105  * Increase it if you want to have more _guaranteed_ speed. Note that
106  * the RAID driver will use the maximum available bandwidth if the IO
107  * subsystem is idle. There is also an 'absolute maximum' reconstruction
108  * speed limit - in case reconstruction slows down your system despite
109  * idle IO detection.
110  *
111  * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
112  * or /sys/block/mdX/md/sync_speed_{min,max}
113  */
114
115 static int sysctl_speed_limit_min = 1000;
116 static int sysctl_speed_limit_max = 200000;
117 static inline int speed_min(struct mddev *mddev)
118 {
119         return mddev->sync_speed_min ?
120                 mddev->sync_speed_min : sysctl_speed_limit_min;
121 }
122
123 static inline int speed_max(struct mddev *mddev)
124 {
125         return mddev->sync_speed_max ?
126                 mddev->sync_speed_max : sysctl_speed_limit_max;
127 }
128
129 static void rdev_uninit_serial(struct md_rdev *rdev)
130 {
131         if (!test_and_clear_bit(CollisionCheck, &rdev->flags))
132                 return;
133
134         kvfree(rdev->serial);
135         rdev->serial = NULL;
136 }
137
138 static void rdevs_uninit_serial(struct mddev *mddev)
139 {
140         struct md_rdev *rdev;
141
142         rdev_for_each(rdev, mddev)
143                 rdev_uninit_serial(rdev);
144 }
145
146 static int rdev_init_serial(struct md_rdev *rdev)
147 {
148         /* serial_nums equals with BARRIER_BUCKETS_NR */
149         int i, serial_nums = 1 << ((PAGE_SHIFT - ilog2(sizeof(atomic_t))));
150         struct serial_in_rdev *serial = NULL;
151
152         if (test_bit(CollisionCheck, &rdev->flags))
153                 return 0;
154
155         serial = kvmalloc(sizeof(struct serial_in_rdev) * serial_nums,
156                           GFP_KERNEL);
157         if (!serial)
158                 return -ENOMEM;
159
160         for (i = 0; i < serial_nums; i++) {
161                 struct serial_in_rdev *serial_tmp = &serial[i];
162
163                 spin_lock_init(&serial_tmp->serial_lock);
164                 serial_tmp->serial_rb = RB_ROOT_CACHED;
165                 init_waitqueue_head(&serial_tmp->serial_io_wait);
166         }
167
168         rdev->serial = serial;
169         set_bit(CollisionCheck, &rdev->flags);
170
171         return 0;
172 }
173
174 static int rdevs_init_serial(struct mddev *mddev)
175 {
176         struct md_rdev *rdev;
177         int ret = 0;
178
179         rdev_for_each(rdev, mddev) {
180                 ret = rdev_init_serial(rdev);
181                 if (ret)
182                         break;
183         }
184
185         /* Free all resources if pool is not existed */
186         if (ret && !mddev->serial_info_pool)
187                 rdevs_uninit_serial(mddev);
188
189         return ret;
190 }
191
192 /*
193  * rdev needs to enable serial stuffs if it meets the conditions:
194  * 1. it is multi-queue device flaged with writemostly.
195  * 2. the write-behind mode is enabled.
196  */
197 static int rdev_need_serial(struct md_rdev *rdev)
198 {
199         return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 &&
200                 rdev->bdev->bd_disk->queue->nr_hw_queues != 1 &&
201                 test_bit(WriteMostly, &rdev->flags));
202 }
203
204 /*
205  * Init resource for rdev(s), then create serial_info_pool if:
206  * 1. rdev is the first device which return true from rdev_enable_serial.
207  * 2. rdev is NULL, means we want to enable serialization for all rdevs.
208  */
209 void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
210                               bool is_suspend)
211 {
212         int ret = 0;
213
214         if (rdev && !rdev_need_serial(rdev) &&
215             !test_bit(CollisionCheck, &rdev->flags))
216                 return;
217
218         if (!is_suspend)
219                 mddev_suspend(mddev);
220
221         if (!rdev)
222                 ret = rdevs_init_serial(mddev);
223         else
224                 ret = rdev_init_serial(rdev);
225         if (ret)
226                 goto abort;
227
228         if (mddev->serial_info_pool == NULL) {
229                 /*
230                  * already in memalloc noio context by
231                  * mddev_suspend()
232                  */
233                 mddev->serial_info_pool =
234                         mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
235                                                 sizeof(struct serial_info));
236                 if (!mddev->serial_info_pool) {
237                         rdevs_uninit_serial(mddev);
238                         pr_err("can't alloc memory pool for serialization\n");
239                 }
240         }
241
242 abort:
243         if (!is_suspend)
244                 mddev_resume(mddev);
245 }
246
247 /*
248  * Free resource from rdev(s), and destroy serial_info_pool under conditions:
249  * 1. rdev is the last device flaged with CollisionCheck.
250  * 2. when bitmap is destroyed while policy is not enabled.
251  * 3. for disable policy, the pool is destroyed only when no rdev needs it.
252  */
253 void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
254                                bool is_suspend)
255 {
256         if (rdev && !test_bit(CollisionCheck, &rdev->flags))
257                 return;
258
259         if (mddev->serial_info_pool) {
260                 struct md_rdev *temp;
261                 int num = 0; /* used to track if other rdevs need the pool */
262
263                 if (!is_suspend)
264                         mddev_suspend(mddev);
265                 rdev_for_each(temp, mddev) {
266                         if (!rdev) {
267                                 if (!mddev->serialize_policy ||
268                                     !rdev_need_serial(temp))
269                                         rdev_uninit_serial(temp);
270                                 else
271                                         num++;
272                         } else if (temp != rdev &&
273                                    test_bit(CollisionCheck, &temp->flags))
274                                 num++;
275                 }
276
277                 if (rdev)
278                         rdev_uninit_serial(rdev);
279
280                 if (num)
281                         pr_info("The mempool could be used by other devices\n");
282                 else {
283                         mempool_destroy(mddev->serial_info_pool);
284                         mddev->serial_info_pool = NULL;
285                 }
286                 if (!is_suspend)
287                         mddev_resume(mddev);
288         }
289 }
290
291 static struct ctl_table_header *raid_table_header;
292
293 static struct ctl_table raid_table[] = {
294         {
295                 .procname       = "speed_limit_min",
296                 .data           = &sysctl_speed_limit_min,
297                 .maxlen         = sizeof(int),
298                 .mode           = S_IRUGO|S_IWUSR,
299                 .proc_handler   = proc_dointvec,
300         },
301         {
302                 .procname       = "speed_limit_max",
303                 .data           = &sysctl_speed_limit_max,
304                 .maxlen         = sizeof(int),
305                 .mode           = S_IRUGO|S_IWUSR,
306                 .proc_handler   = proc_dointvec,
307         },
308         { }
309 };
310
311 static struct ctl_table raid_dir_table[] = {
312         {
313                 .procname       = "raid",
314                 .maxlen         = 0,
315                 .mode           = S_IRUGO|S_IXUGO,
316                 .child          = raid_table,
317         },
318         { }
319 };
320
321 static struct ctl_table raid_root_table[] = {
322         {
323                 .procname       = "dev",
324                 .maxlen         = 0,
325                 .mode           = 0555,
326                 .child          = raid_dir_table,
327         },
328         {  }
329 };
330
331 static int start_readonly;
332
333 /*
334  * The original mechanism for creating an md device is to create
335  * a device node in /dev and to open it.  This causes races with device-close.
336  * The preferred method is to write to the "new_array" module parameter.
337  * This can avoid races.
338  * Setting create_on_open to false disables the original mechanism
339  * so all the races disappear.
340  */
341 static bool create_on_open = true;
342
343 struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
344                             struct mddev *mddev)
345 {
346         if (!mddev || !bioset_initialized(&mddev->bio_set))
347                 return bio_alloc(gfp_mask, nr_iovecs);
348
349         return bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set);
350 }
351 EXPORT_SYMBOL_GPL(bio_alloc_mddev);
352
353 static struct bio *md_bio_alloc_sync(struct mddev *mddev)
354 {
355         if (!mddev || !bioset_initialized(&mddev->sync_set))
356                 return bio_alloc(GFP_NOIO, 1);
357
358         return bio_alloc_bioset(GFP_NOIO, 1, &mddev->sync_set);
359 }
360
361 /*
362  * We have a system wide 'event count' that is incremented
363  * on any 'interesting' event, and readers of /proc/mdstat
364  * can use 'poll' or 'select' to find out when the event
365  * count increases.
366  *
367  * Events are:
368  *  start array, stop array, error, add device, remove device,
369  *  start build, activate spare
370  */
371 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
372 static atomic_t md_event_count;
373 void md_new_event(struct mddev *mddev)
374 {
375         atomic_inc(&md_event_count);
376         wake_up(&md_event_waiters);
377 }
378 EXPORT_SYMBOL_GPL(md_new_event);
379
380 /*
381  * Enables to iterate over all existing md arrays
382  * all_mddevs_lock protects this list.
383  */
384 static LIST_HEAD(all_mddevs);
385 static DEFINE_SPINLOCK(all_mddevs_lock);
386
387 /*
388  * iterates through all used mddevs in the system.
389  * We take care to grab the all_mddevs_lock whenever navigating
390  * the list, and to always hold a refcount when unlocked.
391  * Any code which breaks out of this loop while own
392  * a reference to the current mddev and must mddev_put it.
393  */
394 #define for_each_mddev(_mddev,_tmp)                                     \
395                                                                         \
396         for (({ spin_lock(&all_mddevs_lock);                            \
397                 _tmp = all_mddevs.next;                                 \
398                 _mddev = NULL;});                                       \
399              ({ if (_tmp != &all_mddevs)                                \
400                         mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
401                 spin_unlock(&all_mddevs_lock);                          \
402                 if (_mddev) mddev_put(_mddev);                          \
403                 _mddev = list_entry(_tmp, struct mddev, all_mddevs);    \
404                 _tmp != &all_mddevs;});                                 \
405              ({ spin_lock(&all_mddevs_lock);                            \
406                 _tmp = _tmp->next;})                                    \
407                 )
408
409 /* Rather than calling directly into the personality make_request function,
410  * IO requests come here first so that we can check if the device is
411  * being suspended pending a reconfiguration.
412  * We hold a refcount over the call to ->make_request.  By the time that
413  * call has finished, the bio has been linked into some internal structure
414  * and so is visible to ->quiesce(), so we don't need the refcount any more.
415  */
416 static bool is_suspended(struct mddev *mddev, struct bio *bio)
417 {
418         if (mddev->suspended)
419                 return true;
420         if (bio_data_dir(bio) != WRITE)
421                 return false;
422         if (mddev->suspend_lo >= mddev->suspend_hi)
423                 return false;
424         if (bio->bi_iter.bi_sector >= mddev->suspend_hi)
425                 return false;
426         if (bio_end_sector(bio) < mddev->suspend_lo)
427                 return false;
428         return true;
429 }
430
431 void md_handle_request(struct mddev *mddev, struct bio *bio)
432 {
433 check_suspended:
434         rcu_read_lock();
435         if (is_suspended(mddev, bio)) {
436                 DEFINE_WAIT(__wait);
437                 for (;;) {
438                         prepare_to_wait(&mddev->sb_wait, &__wait,
439                                         TASK_UNINTERRUPTIBLE);
440                         if (!is_suspended(mddev, bio))
441                                 break;
442                         rcu_read_unlock();
443                         schedule();
444                         rcu_read_lock();
445                 }
446                 finish_wait(&mddev->sb_wait, &__wait);
447         }
448         atomic_inc(&mddev->active_io);
449         rcu_read_unlock();
450
451         if (!mddev->pers->make_request(mddev, bio)) {
452                 atomic_dec(&mddev->active_io);
453                 wake_up(&mddev->sb_wait);
454                 goto check_suspended;
455         }
456
457         if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
458                 wake_up(&mddev->sb_wait);
459 }
460 EXPORT_SYMBOL(md_handle_request);
461
462 static blk_qc_t md_submit_bio(struct bio *bio)
463 {
464         const int rw = bio_data_dir(bio);
465         const int sgrp = op_stat_group(bio_op(bio));
466         struct mddev *mddev = bio->bi_disk->private_data;
467         unsigned int sectors;
468
469         if (mddev == NULL || mddev->pers == NULL) {
470                 bio_io_error(bio);
471                 return BLK_QC_T_NONE;
472         }
473
474         if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) {
475                 bio_io_error(bio);
476                 return BLK_QC_T_NONE;
477         }
478
479         blk_queue_split(&bio);
480
481         if (mddev->ro == 1 && unlikely(rw == WRITE)) {
482                 if (bio_sectors(bio) != 0)
483                         bio->bi_status = BLK_STS_IOERR;
484                 bio_endio(bio);
485                 return BLK_QC_T_NONE;
486         }
487
488         /*
489          * save the sectors now since our bio can
490          * go away inside make_request
491          */
492         sectors = bio_sectors(bio);
493         /* bio could be mergeable after passing to underlayer */
494         bio->bi_opf &= ~REQ_NOMERGE;
495
496         md_handle_request(mddev, bio);
497
498         part_stat_lock();
499         part_stat_inc(&mddev->gendisk->part0, ios[sgrp]);
500         part_stat_add(&mddev->gendisk->part0, sectors[sgrp], sectors);
501         part_stat_unlock();
502
503         return BLK_QC_T_NONE;
504 }
505
506 /* mddev_suspend makes sure no new requests are submitted
507  * to the device, and that any requests that have been submitted
508  * are completely handled.
509  * Once mddev_detach() is called and completes, the module will be
510  * completely unused.
511  */
512 void mddev_suspend(struct mddev *mddev)
513 {
514         WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk);
515         lockdep_assert_held(&mddev->reconfig_mutex);
516         if (mddev->suspended++)
517                 return;
518         synchronize_rcu();
519         wake_up(&mddev->sb_wait);
520         set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags);
521         smp_mb__after_atomic();
522         wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
523         mddev->pers->quiesce(mddev, 1);
524         clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags);
525         wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags));
526
527         del_timer_sync(&mddev->safemode_timer);
528         /* restrict memory reclaim I/O during raid array is suspend */
529         mddev->noio_flag = memalloc_noio_save();
530 }
531 EXPORT_SYMBOL_GPL(mddev_suspend);
532
533 void mddev_resume(struct mddev *mddev)
534 {
535         /* entred the memalloc scope from mddev_suspend() */
536         memalloc_noio_restore(mddev->noio_flag);
537         lockdep_assert_held(&mddev->reconfig_mutex);
538         if (--mddev->suspended)
539                 return;
540         wake_up(&mddev->sb_wait);
541         mddev->pers->quiesce(mddev, 0);
542
543         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
544         md_wakeup_thread(mddev->thread);
545         md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
546 }
547 EXPORT_SYMBOL_GPL(mddev_resume);
548
549 /*
550  * Generic flush handling for md
551  */
552
553 static void md_end_flush(struct bio *bio)
554 {
555         struct md_rdev *rdev = bio->bi_private;
556         struct mddev *mddev = rdev->mddev;
557
558         bio_put(bio);
559
560         rdev_dec_pending(rdev, mddev);
561
562         if (atomic_dec_and_test(&mddev->flush_pending)) {
563                 /* The pre-request flush has finished */
564                 queue_work(md_wq, &mddev->flush_work);
565         }
566 }
567
568 static void md_submit_flush_data(struct work_struct *ws);
569
570 static void submit_flushes(struct work_struct *ws)
571 {
572         struct mddev *mddev = container_of(ws, struct mddev, flush_work);
573         struct md_rdev *rdev;
574
575         mddev->start_flush = ktime_get_boottime();
576         INIT_WORK(&mddev->flush_work, md_submit_flush_data);
577         atomic_set(&mddev->flush_pending, 1);
578         rcu_read_lock();
579         rdev_for_each_rcu(rdev, mddev)
580                 if (rdev->raid_disk >= 0 &&
581                     !test_bit(Faulty, &rdev->flags)) {
582                         /* Take two references, one is dropped
583                          * when request finishes, one after
584                          * we reclaim rcu_read_lock
585                          */
586                         struct bio *bi;
587                         atomic_inc(&rdev->nr_pending);
588                         atomic_inc(&rdev->nr_pending);
589                         rcu_read_unlock();
590                         bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
591                         bi->bi_end_io = md_end_flush;
592                         bi->bi_private = rdev;
593                         bio_set_dev(bi, rdev->bdev);
594                         bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
595                         atomic_inc(&mddev->flush_pending);
596                         submit_bio(bi);
597                         rcu_read_lock();
598                         rdev_dec_pending(rdev, mddev);
599                 }
600         rcu_read_unlock();
601         if (atomic_dec_and_test(&mddev->flush_pending))
602                 queue_work(md_wq, &mddev->flush_work);
603 }
604
605 static void md_submit_flush_data(struct work_struct *ws)
606 {
607         struct mddev *mddev = container_of(ws, struct mddev, flush_work);
608         struct bio *bio = mddev->flush_bio;
609
610         /*
611          * must reset flush_bio before calling into md_handle_request to avoid a
612          * deadlock, because other bios passed md_handle_request suspend check
613          * could wait for this and below md_handle_request could wait for those
614          * bios because of suspend check
615          */
616         spin_lock_irq(&mddev->lock);
617         mddev->last_flush = mddev->start_flush;
618         mddev->flush_bio = NULL;
619         spin_unlock_irq(&mddev->lock);
620         wake_up(&mddev->sb_wait);
621
622         if (bio->bi_iter.bi_size == 0) {
623                 /* an empty barrier - all done */
624                 bio_endio(bio);
625         } else {
626                 bio->bi_opf &= ~REQ_PREFLUSH;
627                 md_handle_request(mddev, bio);
628         }
629 }
630
631 /*
632  * Manages consolidation of flushes and submitting any flushes needed for
633  * a bio with REQ_PREFLUSH.  Returns true if the bio is finished or is
634  * being finished in another context.  Returns false if the flushing is
635  * complete but still needs the I/O portion of the bio to be processed.
636  */
637 bool md_flush_request(struct mddev *mddev, struct bio *bio)
638 {
639         ktime_t start = ktime_get_boottime();
640         spin_lock_irq(&mddev->lock);
641         wait_event_lock_irq(mddev->sb_wait,
642                             !mddev->flush_bio ||
643                             ktime_after(mddev->last_flush, start),
644                             mddev->lock);
645         if (!ktime_after(mddev->last_flush, start)) {
646                 WARN_ON(mddev->flush_bio);
647                 mddev->flush_bio = bio;
648                 bio = NULL;
649         }
650         spin_unlock_irq(&mddev->lock);
651
652         if (!bio) {
653                 INIT_WORK(&mddev->flush_work, submit_flushes);
654                 queue_work(md_wq, &mddev->flush_work);
655         } else {
656                 /* flush was performed for some other bio while we waited. */
657                 if (bio->bi_iter.bi_size == 0)
658                         /* an empty barrier - all done */
659                         bio_endio(bio);
660                 else {
661                         bio->bi_opf &= ~REQ_PREFLUSH;
662                         return false;
663                 }
664         }
665         return true;
666 }
667 EXPORT_SYMBOL(md_flush_request);
668
669 static inline struct mddev *mddev_get(struct mddev *mddev)
670 {
671         atomic_inc(&mddev->active);
672         return mddev;
673 }
674
675 static void mddev_delayed_delete(struct work_struct *ws);
676
677 static void mddev_put(struct mddev *mddev)
678 {
679         if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
680                 return;
681         if (!mddev->raid_disks && list_empty(&mddev->disks) &&
682             mddev->ctime == 0 && !mddev->hold_active) {
683                 /* Array is not configured at all, and not held active,
684                  * so destroy it */
685                 list_del_init(&mddev->all_mddevs);
686
687                 /*
688                  * Call queue_work inside the spinlock so that
689                  * flush_workqueue() after mddev_find will succeed in waiting
690                  * for the work to be done.
691                  */
692                 INIT_WORK(&mddev->del_work, mddev_delayed_delete);
693                 queue_work(md_misc_wq, &mddev->del_work);
694         }
695         spin_unlock(&all_mddevs_lock);
696 }
697
698 static void md_safemode_timeout(struct timer_list *t);
699
700 void mddev_init(struct mddev *mddev)
701 {
702         kobject_init(&mddev->kobj, &md_ktype);
703         mutex_init(&mddev->open_mutex);
704         mutex_init(&mddev->reconfig_mutex);
705         mutex_init(&mddev->bitmap_info.mutex);
706         INIT_LIST_HEAD(&mddev->disks);
707         INIT_LIST_HEAD(&mddev->all_mddevs);
708         timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0);
709         atomic_set(&mddev->active, 1);
710         atomic_set(&mddev->openers, 0);
711         atomic_set(&mddev->active_io, 0);
712         spin_lock_init(&mddev->lock);
713         atomic_set(&mddev->flush_pending, 0);
714         init_waitqueue_head(&mddev->sb_wait);
715         init_waitqueue_head(&mddev->recovery_wait);
716         mddev->reshape_position = MaxSector;
717         mddev->reshape_backwards = 0;
718         mddev->last_sync_action = "none";
719         mddev->resync_min = 0;
720         mddev->resync_max = MaxSector;
721         mddev->level = LEVEL_NONE;
722 }
723 EXPORT_SYMBOL_GPL(mddev_init);
724
725 static struct mddev *mddev_find_locked(dev_t unit)
726 {
727         struct mddev *mddev;
728
729         list_for_each_entry(mddev, &all_mddevs, all_mddevs)
730                 if (mddev->unit == unit)
731                         return mddev;
732
733         return NULL;
734 }
735
736 static struct mddev *mddev_find(dev_t unit)
737 {
738         struct mddev *mddev;
739
740         if (MAJOR(unit) != MD_MAJOR)
741                 unit &= ~((1 << MdpMinorShift) - 1);
742
743         spin_lock(&all_mddevs_lock);
744         mddev = mddev_find_locked(unit);
745         if (mddev)
746                 mddev_get(mddev);
747         spin_unlock(&all_mddevs_lock);
748
749         return mddev;
750 }
751
752 static struct mddev *mddev_find_or_alloc(dev_t unit)
753 {
754         struct mddev *mddev, *new = NULL;
755
756         if (unit && MAJOR(unit) != MD_MAJOR)
757                 unit &= ~((1<<MdpMinorShift)-1);
758
759  retry:
760         spin_lock(&all_mddevs_lock);
761
762         if (unit) {
763                 mddev = mddev_find_locked(unit);
764                 if (mddev) {
765                         mddev_get(mddev);
766                         spin_unlock(&all_mddevs_lock);
767                         kfree(new);
768                         return mddev;
769                 }
770
771                 if (new) {
772                         list_add(&new->all_mddevs, &all_mddevs);
773                         spin_unlock(&all_mddevs_lock);
774                         new->hold_active = UNTIL_IOCTL;
775                         return new;
776                 }
777         } else if (new) {
778                 /* find an unused unit number */
779                 static int next_minor = 512;
780                 int start = next_minor;
781                 int is_free = 0;
782                 int dev = 0;
783                 while (!is_free) {
784                         dev = MKDEV(MD_MAJOR, next_minor);
785                         next_minor++;
786                         if (next_minor > MINORMASK)
787                                 next_minor = 0;
788                         if (next_minor == start) {
789                                 /* Oh dear, all in use. */
790                                 spin_unlock(&all_mddevs_lock);
791                                 kfree(new);
792                                 return NULL;
793                         }
794
795                         is_free = !mddev_find_locked(dev);
796                 }
797                 new->unit = dev;
798                 new->md_minor = MINOR(dev);
799                 new->hold_active = UNTIL_STOP;
800                 list_add(&new->all_mddevs, &all_mddevs);
801                 spin_unlock(&all_mddevs_lock);
802                 return new;
803         }
804         spin_unlock(&all_mddevs_lock);
805
806         new = kzalloc(sizeof(*new), GFP_KERNEL);
807         if (!new)
808                 return NULL;
809
810         new->unit = unit;
811         if (MAJOR(unit) == MD_MAJOR)
812                 new->md_minor = MINOR(unit);
813         else
814                 new->md_minor = MINOR(unit) >> MdpMinorShift;
815
816         mddev_init(new);
817
818         goto retry;
819 }
820
821 static struct attribute_group md_redundancy_group;
822
823 void mddev_unlock(struct mddev *mddev)
824 {
825         if (mddev->to_remove) {
826                 /* These cannot be removed under reconfig_mutex as
827                  * an access to the files will try to take reconfig_mutex
828                  * while holding the file unremovable, which leads to
829                  * a deadlock.
830                  * So hold set sysfs_active while the remove in happeing,
831                  * and anything else which might set ->to_remove or my
832                  * otherwise change the sysfs namespace will fail with
833                  * -EBUSY if sysfs_active is still set.
834                  * We set sysfs_active under reconfig_mutex and elsewhere
835                  * test it under the same mutex to ensure its correct value
836                  * is seen.
837                  */
838                 struct attribute_group *to_remove = mddev->to_remove;
839                 mddev->to_remove = NULL;
840                 mddev->sysfs_active = 1;
841                 mutex_unlock(&mddev->reconfig_mutex);
842
843                 if (mddev->kobj.sd) {
844                         if (to_remove != &md_redundancy_group)
845                                 sysfs_remove_group(&mddev->kobj, to_remove);
846                         if (mddev->pers == NULL ||
847                             mddev->pers->sync_request == NULL) {
848                                 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
849                                 if (mddev->sysfs_action)
850                                         sysfs_put(mddev->sysfs_action);
851                                 if (mddev->sysfs_completed)
852                                         sysfs_put(mddev->sysfs_completed);
853                                 if (mddev->sysfs_degraded)
854                                         sysfs_put(mddev->sysfs_degraded);
855                                 mddev->sysfs_action = NULL;
856                                 mddev->sysfs_completed = NULL;
857                                 mddev->sysfs_degraded = NULL;
858                         }
859                 }
860                 mddev->sysfs_active = 0;
861         } else
862                 mutex_unlock(&mddev->reconfig_mutex);
863
864         /* As we've dropped the mutex we need a spinlock to
865          * make sure the thread doesn't disappear
866          */
867         spin_lock(&pers_lock);
868         md_wakeup_thread(mddev->thread);
869         wake_up(&mddev->sb_wait);
870         spin_unlock(&pers_lock);
871 }
872 EXPORT_SYMBOL_GPL(mddev_unlock);
873
874 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr)
875 {
876         struct md_rdev *rdev;
877
878         rdev_for_each_rcu(rdev, mddev)
879                 if (rdev->desc_nr == nr)
880                         return rdev;
881
882         return NULL;
883 }
884 EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu);
885
886 static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
887 {
888         struct md_rdev *rdev;
889
890         rdev_for_each(rdev, mddev)
891                 if (rdev->bdev->bd_dev == dev)
892                         return rdev;
893
894         return NULL;
895 }
896
897 struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev)
898 {
899         struct md_rdev *rdev;
900
901         rdev_for_each_rcu(rdev, mddev)
902                 if (rdev->bdev->bd_dev == dev)
903                         return rdev;
904
905         return NULL;
906 }
907 EXPORT_SYMBOL_GPL(md_find_rdev_rcu);
908
909 static struct md_personality *find_pers(int level, char *clevel)
910 {
911         struct md_personality *pers;
912         list_for_each_entry(pers, &pers_list, list) {
913                 if (level != LEVEL_NONE && pers->level == level)
914                         return pers;
915                 if (strcmp(pers->name, clevel)==0)
916                         return pers;
917         }
918         return NULL;
919 }
920
921 /* return the offset of the super block in 512byte sectors */
922 static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
923 {
924         sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512;
925         return MD_NEW_SIZE_SECTORS(num_sectors);
926 }
927
928 static int alloc_disk_sb(struct md_rdev *rdev)
929 {
930         rdev->sb_page = alloc_page(GFP_KERNEL);
931         if (!rdev->sb_page)
932                 return -ENOMEM;
933         return 0;
934 }
935
936 void md_rdev_clear(struct md_rdev *rdev)
937 {
938         if (rdev->sb_page) {
939                 put_page(rdev->sb_page);
940                 rdev->sb_loaded = 0;
941                 rdev->sb_page = NULL;
942                 rdev->sb_start = 0;
943                 rdev->sectors = 0;
944         }
945         if (rdev->bb_page) {
946                 put_page(rdev->bb_page);
947                 rdev->bb_page = NULL;
948         }
949         badblocks_exit(&rdev->badblocks);
950 }
951 EXPORT_SYMBOL_GPL(md_rdev_clear);
952
953 static void super_written(struct bio *bio)
954 {
955         struct md_rdev *rdev = bio->bi_private;
956         struct mddev *mddev = rdev->mddev;
957
958         if (bio->bi_status) {
959                 pr_err("md: %s gets error=%d\n", __func__,
960                        blk_status_to_errno(bio->bi_status));
961                 md_error(mddev, rdev);
962                 if (!test_bit(Faulty, &rdev->flags)
963                     && (bio->bi_opf & MD_FAILFAST)) {
964                         set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags);
965                         set_bit(LastDev, &rdev->flags);
966                 }
967         } else
968                 clear_bit(LastDev, &rdev->flags);
969
970         bio_put(bio);
971
972         rdev_dec_pending(rdev, mddev);
973
974         if (atomic_dec_and_test(&mddev->pending_writes))
975                 wake_up(&mddev->sb_wait);
976 }
977
978 void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
979                    sector_t sector, int size, struct page *page)
980 {
981         /* write first size bytes of page to sector of rdev
982          * Increment mddev->pending_writes before returning
983          * and decrement it on completion, waking up sb_wait
984          * if zero is reached.
985          * If an error occurred, call md_error
986          */
987         struct bio *bio;
988         int ff = 0;
989
990         if (!page)
991                 return;
992
993         if (test_bit(Faulty, &rdev->flags))
994                 return;
995
996         bio = md_bio_alloc_sync(mddev);
997
998         atomic_inc(&rdev->nr_pending);
999
1000         bio_set_dev(bio, rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev);
1001         bio->bi_iter.bi_sector = sector;
1002         bio_add_page(bio, page, size, 0);
1003         bio->bi_private = rdev;
1004         bio->bi_end_io = super_written;
1005
1006         if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) &&
1007             test_bit(FailFast, &rdev->flags) &&
1008             !test_bit(LastDev, &rdev->flags))
1009                 ff = MD_FAILFAST;
1010         bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA | ff;
1011
1012         atomic_inc(&mddev->pending_writes);
1013         submit_bio(bio);
1014 }
1015
1016 int md_super_wait(struct mddev *mddev)
1017 {
1018         /* wait for all superblock writes that were scheduled to complete */
1019         wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
1020         if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags))
1021                 return -EAGAIN;
1022         return 0;
1023 }
1024
1025 int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
1026                  struct page *page, int op, int op_flags, bool metadata_op)
1027 {
1028         struct bio *bio = md_bio_alloc_sync(rdev->mddev);
1029         int ret;
1030
1031         if (metadata_op && rdev->meta_bdev)
1032                 bio_set_dev(bio, rdev->meta_bdev);
1033         else
1034                 bio_set_dev(bio, rdev->bdev);
1035         bio_set_op_attrs(bio, op, op_flags);
1036         if (metadata_op)
1037                 bio->bi_iter.bi_sector = sector + rdev->sb_start;
1038         else if (rdev->mddev->reshape_position != MaxSector &&
1039                  (rdev->mddev->reshape_backwards ==
1040                   (sector >= rdev->mddev->reshape_position)))
1041                 bio->bi_iter.bi_sector = sector + rdev->new_data_offset;
1042         else
1043                 bio->bi_iter.bi_sector = sector + rdev->data_offset;
1044         bio_add_page(bio, page, size, 0);
1045
1046         submit_bio_wait(bio);
1047
1048         ret = !bio->bi_status;
1049         bio_put(bio);
1050         return ret;
1051 }
1052 EXPORT_SYMBOL_GPL(sync_page_io);
1053
1054 static int read_disk_sb(struct md_rdev *rdev, int size)
1055 {
1056         char b[BDEVNAME_SIZE];
1057
1058         if (rdev->sb_loaded)
1059                 return 0;
1060
1061         if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true))
1062                 goto fail;
1063         rdev->sb_loaded = 1;
1064         return 0;
1065
1066 fail:
1067         pr_err("md: disabled device %s, could not read superblock.\n",
1068                bdevname(rdev->bdev,b));
1069         return -EINVAL;
1070 }
1071
1072 static int md_uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
1073 {
1074         return  sb1->set_uuid0 == sb2->set_uuid0 &&
1075                 sb1->set_uuid1 == sb2->set_uuid1 &&
1076                 sb1->set_uuid2 == sb2->set_uuid2 &&
1077                 sb1->set_uuid3 == sb2->set_uuid3;
1078 }
1079
1080 static int md_sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
1081 {
1082         int ret;
1083         mdp_super_t *tmp1, *tmp2;
1084
1085         tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
1086         tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
1087
1088         if (!tmp1 || !tmp2) {
1089                 ret = 0;
1090                 goto abort;
1091         }
1092
1093         *tmp1 = *sb1;
1094         *tmp2 = *sb2;
1095
1096         /*
1097          * nr_disks is not constant
1098          */
1099         tmp1->nr_disks = 0;
1100         tmp2->nr_disks = 0;
1101
1102         ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
1103 abort:
1104         kfree(tmp1);
1105         kfree(tmp2);
1106         return ret;
1107 }
1108
1109 static u32 md_csum_fold(u32 csum)
1110 {
1111         csum = (csum & 0xffff) + (csum >> 16);
1112         return (csum & 0xffff) + (csum >> 16);
1113 }
1114
1115 static unsigned int calc_sb_csum(mdp_super_t *sb)
1116 {
1117         u64 newcsum = 0;
1118         u32 *sb32 = (u32*)sb;
1119         int i;
1120         unsigned int disk_csum, csum;
1121
1122         disk_csum = sb->sb_csum;
1123         sb->sb_csum = 0;
1124
1125         for (i = 0; i < MD_SB_BYTES/4 ; i++)
1126                 newcsum += sb32[i];
1127         csum = (newcsum & 0xffffffff) + (newcsum>>32);
1128
1129 #ifdef CONFIG_ALPHA
1130         /* This used to use csum_partial, which was wrong for several
1131          * reasons including that different results are returned on
1132          * different architectures.  It isn't critical that we get exactly
1133          * the same return value as before (we always csum_fold before
1134          * testing, and that removes any differences).  However as we
1135          * know that csum_partial always returned a 16bit value on
1136          * alphas, do a fold to maximise conformity to previous behaviour.
1137          */
1138         sb->sb_csum = md_csum_fold(disk_csum);
1139 #else
1140         sb->sb_csum = disk_csum;
1141 #endif
1142         return csum;
1143 }
1144
1145 /*
1146  * Handle superblock details.
1147  * We want to be able to handle multiple superblock formats
1148  * so we have a common interface to them all, and an array of
1149  * different handlers.
1150  * We rely on user-space to write the initial superblock, and support
1151  * reading and updating of superblocks.
1152  * Interface methods are:
1153  *   int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version)
1154  *      loads and validates a superblock on dev.
1155  *      if refdev != NULL, compare superblocks on both devices
1156  *    Return:
1157  *      0 - dev has a superblock that is compatible with refdev
1158  *      1 - dev has a superblock that is compatible and newer than refdev
1159  *          so dev should be used as the refdev in future
1160  *     -EINVAL superblock incompatible or invalid
1161  *     -othererror e.g. -EIO
1162  *
1163  *   int validate_super(struct mddev *mddev, struct md_rdev *dev)
1164  *      Verify that dev is acceptable into mddev.
1165  *       The first time, mddev->raid_disks will be 0, and data from
1166  *       dev should be merged in.  Subsequent calls check that dev
1167  *       is new enough.  Return 0 or -EINVAL
1168  *
1169  *   void sync_super(struct mddev *mddev, struct md_rdev *dev)
1170  *     Update the superblock for rdev with data in mddev
1171  *     This does not write to disc.
1172  *
1173  */
1174
1175 struct super_type  {
1176         char                *name;
1177         struct module       *owner;
1178         int                 (*load_super)(struct md_rdev *rdev,
1179                                           struct md_rdev *refdev,
1180                                           int minor_version);
1181         int                 (*validate_super)(struct mddev *mddev,
1182                                               struct md_rdev *freshest,
1183                                               struct md_rdev *rdev);
1184         void                (*sync_super)(struct mddev *mddev,
1185                                           struct md_rdev *rdev);
1186         unsigned long long  (*rdev_size_change)(struct md_rdev *rdev,
1187                                                 sector_t num_sectors);
1188         int                 (*allow_new_offset)(struct md_rdev *rdev,
1189                                                 unsigned long long new_offset);
1190 };
1191
1192 /*
1193  * Check that the given mddev has no bitmap.
1194  *
1195  * This function is called from the run method of all personalities that do not
1196  * support bitmaps. It prints an error message and returns non-zero if mddev
1197  * has a bitmap. Otherwise, it returns 0.
1198  *
1199  */
1200 int md_check_no_bitmap(struct mddev *mddev)
1201 {
1202         if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
1203                 return 0;
1204         pr_warn("%s: bitmaps are not supported for %s\n",
1205                 mdname(mddev), mddev->pers->name);
1206         return 1;
1207 }
1208 EXPORT_SYMBOL(md_check_no_bitmap);
1209
1210 /*
1211  * load_super for 0.90.0
1212  */
1213 static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
1214 {
1215         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1216         mdp_super_t *sb;
1217         int ret;
1218         bool spare_disk = true;
1219
1220         /*
1221          * Calculate the position of the superblock (512byte sectors),
1222          * it's at the end of the disk.
1223          *
1224          * It also happens to be a multiple of 4Kb.
1225          */
1226         rdev->sb_start = calc_dev_sboffset(rdev);
1227
1228         ret = read_disk_sb(rdev, MD_SB_BYTES);
1229         if (ret)
1230                 return ret;
1231
1232         ret = -EINVAL;
1233
1234         bdevname(rdev->bdev, b);
1235         sb = page_address(rdev->sb_page);
1236
1237         if (sb->md_magic != MD_SB_MAGIC) {
1238                 pr_warn("md: invalid raid superblock magic on %s\n", b);
1239                 goto abort;
1240         }
1241
1242         if (sb->major_version != 0 ||
1243             sb->minor_version < 90 ||
1244             sb->minor_version > 91) {
1245                 pr_warn("Bad version number %d.%d on %s\n",
1246                         sb->major_version, sb->minor_version, b);
1247                 goto abort;
1248         }
1249
1250         if (sb->raid_disks <= 0)
1251                 goto abort;
1252
1253         if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
1254                 pr_warn("md: invalid superblock checksum on %s\n", b);
1255                 goto abort;
1256         }
1257
1258         rdev->preferred_minor = sb->md_minor;
1259         rdev->data_offset = 0;
1260         rdev->new_data_offset = 0;
1261         rdev->sb_size = MD_SB_BYTES;
1262         rdev->badblocks.shift = -1;
1263
1264         if (sb->level == LEVEL_MULTIPATH)
1265                 rdev->desc_nr = -1;
1266         else
1267                 rdev->desc_nr = sb->this_disk.number;
1268
1269         /* not spare disk, or LEVEL_MULTIPATH */
1270         if (sb->level == LEVEL_MULTIPATH ||
1271                 (rdev->desc_nr >= 0 &&
1272                  rdev->desc_nr < MD_SB_DISKS &&
1273                  sb->disks[rdev->desc_nr].state &
1274                  ((1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))))
1275                 spare_disk = false;
1276
1277         if (!refdev) {
1278                 if (!spare_disk)
1279                         ret = 1;
1280                 else
1281                         ret = 0;
1282         } else {
1283                 __u64 ev1, ev2;
1284                 mdp_super_t *refsb = page_address(refdev->sb_page);
1285                 if (!md_uuid_equal(refsb, sb)) {
1286                         pr_warn("md: %s has different UUID to %s\n",
1287                                 b, bdevname(refdev->bdev,b2));
1288                         goto abort;
1289                 }
1290                 if (!md_sb_equal(refsb, sb)) {
1291                         pr_warn("md: %s has same UUID but different superblock to %s\n",
1292                                 b, bdevname(refdev->bdev, b2));
1293                         goto abort;
1294                 }
1295                 ev1 = md_event(sb);
1296                 ev2 = md_event(refsb);
1297
1298                 if (!spare_disk && ev1 > ev2)
1299                         ret = 1;
1300                 else
1301                         ret = 0;
1302         }
1303         rdev->sectors = rdev->sb_start;
1304         /* Limit to 4TB as metadata cannot record more than that.
1305          * (not needed for Linear and RAID0 as metadata doesn't
1306          * record this size)
1307          */
1308         if ((u64)rdev->sectors >= (2ULL << 32) && sb->level >= 1)
1309                 rdev->sectors = (sector_t)(2ULL << 32) - 2;
1310
1311         if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
1312                 /* "this cannot possibly happen" ... */
1313                 ret = -EINVAL;
1314
1315  abort:
1316         return ret;
1317 }
1318
1319 /*
1320  * validate_super for 0.90.0
1321  * note: we are not using "freshest" for 0.9 superblock
1322  */
1323 static int super_90_validate(struct mddev *mddev, struct md_rdev *freshest, struct md_rdev *rdev)
1324 {
1325         mdp_disk_t *desc;
1326         mdp_super_t *sb = page_address(rdev->sb_page);
1327         __u64 ev1 = md_event(sb);
1328
1329         rdev->raid_disk = -1;
1330         clear_bit(Faulty, &rdev->flags);
1331         clear_bit(In_sync, &rdev->flags);
1332         clear_bit(Bitmap_sync, &rdev->flags);
1333         clear_bit(WriteMostly, &rdev->flags);
1334
1335         if (mddev->raid_disks == 0) {
1336                 mddev->major_version = 0;
1337                 mddev->minor_version = sb->minor_version;
1338                 mddev->patch_version = sb->patch_version;
1339                 mddev->external = 0;
1340                 mddev->chunk_sectors = sb->chunk_size >> 9;
1341                 mddev->ctime = sb->ctime;
1342                 mddev->utime = sb->utime;
1343                 mddev->level = sb->level;
1344                 mddev->clevel[0] = 0;
1345                 mddev->layout = sb->layout;
1346                 mddev->raid_disks = sb->raid_disks;
1347                 mddev->dev_sectors = ((sector_t)sb->size) * 2;
1348                 mddev->events = ev1;
1349                 mddev->bitmap_info.offset = 0;
1350                 mddev->bitmap_info.space = 0;
1351                 /* bitmap can use 60 K after the 4K superblocks */
1352                 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
1353                 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
1354                 mddev->reshape_backwards = 0;
1355
1356                 if (mddev->minor_version >= 91) {
1357                         mddev->reshape_position = sb->reshape_position;
1358                         mddev->delta_disks = sb->delta_disks;
1359                         mddev->new_level = sb->new_level;
1360                         mddev->new_layout = sb->new_layout;
1361                         mddev->new_chunk_sectors = sb->new_chunk >> 9;
1362                         if (mddev->delta_disks < 0)
1363                                 mddev->reshape_backwards = 1;
1364                 } else {
1365                         mddev->reshape_position = MaxSector;
1366                         mddev->delta_disks = 0;
1367                         mddev->new_level = mddev->level;
1368                         mddev->new_layout = mddev->layout;
1369                         mddev->new_chunk_sectors = mddev->chunk_sectors;
1370                 }
1371                 if (mddev->level == 0)
1372                         mddev->layout = -1;
1373
1374                 if (sb->state & (1<<MD_SB_CLEAN))
1375                         mddev->recovery_cp = MaxSector;
1376                 else {
1377                         if (sb->events_hi == sb->cp_events_hi &&
1378                                 sb->events_lo == sb->cp_events_lo) {
1379                                 mddev->recovery_cp = sb->recovery_cp;
1380                         } else
1381                                 mddev->recovery_cp = 0;
1382                 }
1383
1384                 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
1385                 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
1386                 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
1387                 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
1388
1389                 mddev->max_disks = MD_SB_DISKS;
1390
1391                 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
1392                     mddev->bitmap_info.file == NULL) {
1393                         mddev->bitmap_info.offset =
1394                                 mddev->bitmap_info.default_offset;
1395                         mddev->bitmap_info.space =
1396                                 mddev->bitmap_info.default_space;
1397                 }
1398
1399         } else if (mddev->pers == NULL) {
1400                 /* Insist on good event counter while assembling, except
1401                  * for spares (which don't need an event count) */
1402                 ++ev1;
1403                 if (sb->disks[rdev->desc_nr].state & (
1404                             (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
1405                         if (ev1 < mddev->events)
1406                                 return -EINVAL;
1407         } else if (mddev->bitmap) {
1408                 /* if adding to array with a bitmap, then we can accept an
1409                  * older device ... but not too old.
1410                  */
1411                 if (ev1 < mddev->bitmap->events_cleared)
1412                         return 0;
1413                 if (ev1 < mddev->events)
1414                         set_bit(Bitmap_sync, &rdev->flags);
1415         } else {
1416                 if (ev1 < mddev->events)
1417                         /* just a hot-add of a new device, leave raid_disk at -1 */
1418                         return 0;
1419         }
1420
1421         if (mddev->level != LEVEL_MULTIPATH) {
1422                 desc = sb->disks + rdev->desc_nr;
1423
1424                 if (desc->state & (1<<MD_DISK_FAULTY))
1425                         set_bit(Faulty, &rdev->flags);
1426                 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
1427                             desc->raid_disk < mddev->raid_disks */) {
1428                         set_bit(In_sync, &rdev->flags);
1429                         rdev->raid_disk = desc->raid_disk;
1430                         rdev->saved_raid_disk = desc->raid_disk;
1431                 } else if (desc->state & (1<<MD_DISK_ACTIVE)) {
1432                         /* active but not in sync implies recovery up to
1433                          * reshape position.  We don't know exactly where
1434                          * that is, so set to zero for now */
1435                         if (mddev->minor_version >= 91) {
1436                                 rdev->recovery_offset = 0;
1437                                 rdev->raid_disk = desc->raid_disk;
1438                         }
1439                 }
1440                 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
1441                         set_bit(WriteMostly, &rdev->flags);
1442                 if (desc->state & (1<<MD_DISK_FAILFAST))
1443                         set_bit(FailFast, &rdev->flags);
1444         } else /* MULTIPATH are always insync */
1445                 set_bit(In_sync, &rdev->flags);
1446         return 0;
1447 }
1448
1449 /*
1450  * sync_super for 0.90.0
1451  */
1452 static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
1453 {
1454         mdp_super_t *sb;
1455         struct md_rdev *rdev2;
1456         int next_spare = mddev->raid_disks;
1457
1458         /* make rdev->sb match mddev data..
1459          *
1460          * 1/ zero out disks
1461          * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
1462          * 3/ any empty disks < next_spare become removed
1463          *
1464          * disks[0] gets initialised to REMOVED because
1465          * we cannot be sure from other fields if it has
1466          * been initialised or not.
1467          */
1468         int i;
1469         int active=0, working=0,failed=0,spare=0,nr_disks=0;
1470
1471         rdev->sb_size = MD_SB_BYTES;
1472
1473         sb = page_address(rdev->sb_page);
1474
1475         memset(sb, 0, sizeof(*sb));
1476
1477         sb->md_magic = MD_SB_MAGIC;
1478         sb->major_version = mddev->major_version;
1479         sb->patch_version = mddev->patch_version;
1480         sb->gvalid_words  = 0; /* ignored */
1481         memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
1482         memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
1483         memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
1484         memcpy(&sb->set_uuid3, mddev->uuid+12,4);
1485
1486         sb->ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
1487         sb->level = mddev->level;
1488         sb->size = mddev->dev_sectors / 2;
1489         sb->raid_disks = mddev->raid_disks;
1490         sb->md_minor = mddev->md_minor;
1491         sb->not_persistent = 0;
1492         sb->utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
1493         sb->state = 0;
1494         sb->events_hi = (mddev->events>>32);
1495         sb->events_lo = (u32)mddev->events;
1496
1497         if (mddev->reshape_position == MaxSector)
1498                 sb->minor_version = 90;
1499         else {
1500                 sb->minor_version = 91;
1501                 sb->reshape_position = mddev->reshape_position;
1502                 sb->new_level = mddev->new_level;
1503                 sb->delta_disks = mddev->delta_disks;
1504                 sb->new_layout = mddev->new_layout;
1505                 sb->new_chunk = mddev->new_chunk_sectors << 9;
1506         }
1507         mddev->minor_version = sb->minor_version;
1508         if (mddev->in_sync)
1509         {
1510                 sb->recovery_cp = mddev->recovery_cp;
1511                 sb->cp_events_hi = (mddev->events>>32);
1512                 sb->cp_events_lo = (u32)mddev->events;
1513                 if (mddev->recovery_cp == MaxSector)
1514                         sb->state = (1<< MD_SB_CLEAN);
1515         } else
1516                 sb->recovery_cp = 0;
1517
1518         sb->layout = mddev->layout;
1519         sb->chunk_size = mddev->chunk_sectors << 9;
1520
1521         if (mddev->bitmap && mddev->bitmap_info.file == NULL)
1522                 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
1523
1524         sb->disks[0].state = (1<<MD_DISK_REMOVED);
1525         rdev_for_each(rdev2, mddev) {
1526                 mdp_disk_t *d;
1527                 int desc_nr;
1528                 int is_active = test_bit(In_sync, &rdev2->flags);
1529
1530                 if (rdev2->raid_disk >= 0 &&
1531                     sb->minor_version >= 91)
1532                         /* we have nowhere to store the recovery_offset,
1533                          * but if it is not below the reshape_position,
1534                          * we can piggy-back on that.
1535                          */
1536                         is_active = 1;
1537                 if (rdev2->raid_disk < 0 ||
1538                     test_bit(Faulty, &rdev2->flags))
1539                         is_active = 0;
1540                 if (is_active)
1541                         desc_nr = rdev2->raid_disk;
1542                 else
1543                         desc_nr = next_spare++;
1544                 rdev2->desc_nr = desc_nr;
1545                 d = &sb->disks[rdev2->desc_nr];
1546                 nr_disks++;
1547                 d->number = rdev2->desc_nr;
1548                 d->major = MAJOR(rdev2->bdev->bd_dev);
1549                 d->minor = MINOR(rdev2->bdev->bd_dev);
1550                 if (is_active)
1551                         d->raid_disk = rdev2->raid_disk;
1552                 else
1553                         d->raid_disk = rdev2->desc_nr; /* compatibility */
1554                 if (test_bit(Faulty, &rdev2->flags))
1555                         d->state = (1<<MD_DISK_FAULTY);
1556                 else if (is_active) {
1557                         d->state = (1<<MD_DISK_ACTIVE);
1558                         if (test_bit(In_sync, &rdev2->flags))
1559                                 d->state |= (1<<MD_DISK_SYNC);
1560                         active++;
1561                         working++;
1562                 } else {
1563                         d->state = 0;
1564                         spare++;
1565                         working++;
1566                 }
1567                 if (test_bit(WriteMostly, &rdev2->flags))
1568                         d->state |= (1<<MD_DISK_WRITEMOSTLY);
1569                 if (test_bit(FailFast, &rdev2->flags))
1570                         d->state |= (1<<MD_DISK_FAILFAST);
1571         }
1572         /* now set the "removed" and "faulty" bits on any missing devices */
1573         for (i=0 ; i < mddev->raid_disks ; i++) {
1574                 mdp_disk_t *d = &sb->disks[i];
1575                 if (d->state == 0 && d->number == 0) {
1576                         d->number = i;
1577                         d->raid_disk = i;
1578                         d->state = (1<<MD_DISK_REMOVED);
1579                         d->state |= (1<<MD_DISK_FAULTY);
1580                         failed++;
1581                 }
1582         }
1583         sb->nr_disks = nr_disks;
1584         sb->active_disks = active;
1585         sb->working_disks = working;
1586         sb->failed_disks = failed;
1587         sb->spare_disks = spare;
1588
1589         sb->this_disk = sb->disks[rdev->desc_nr];
1590         sb->sb_csum = calc_sb_csum(sb);
1591 }
1592
1593 /*
1594  * rdev_size_change for 0.90.0
1595  */
1596 static unsigned long long
1597 super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1598 {
1599         if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1600                 return 0; /* component must fit device */
1601         if (rdev->mddev->bitmap_info.offset)
1602                 return 0; /* can't move bitmap */
1603         rdev->sb_start = calc_dev_sboffset(rdev);
1604         if (!num_sectors || num_sectors > rdev->sb_start)
1605                 num_sectors = rdev->sb_start;
1606         /* Limit to 4TB as metadata cannot record more than that.
1607          * 4TB == 2^32 KB, or 2*2^32 sectors.
1608          */
1609         if ((u64)num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
1610                 num_sectors = (sector_t)(2ULL << 32) - 2;
1611         do {
1612                 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1613                        rdev->sb_page);
1614         } while (md_super_wait(rdev->mddev) < 0);
1615         return num_sectors;
1616 }
1617
1618 static int
1619 super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset)
1620 {
1621         /* non-zero offset changes not possible with v0.90 */
1622         return new_offset == 0;
1623 }
1624
1625 /*
1626  * version 1 superblock
1627  */
1628
1629 static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb)
1630 {
1631         __le32 disk_csum;
1632         u32 csum;
1633         unsigned long long newcsum;
1634         int size = 256 + le32_to_cpu(sb->max_dev)*2;
1635         __le32 *isuper = (__le32*)sb;
1636
1637         disk_csum = sb->sb_csum;
1638         sb->sb_csum = 0;
1639         newcsum = 0;
1640         for (; size >= 4; size -= 4)
1641                 newcsum += le32_to_cpu(*isuper++);
1642
1643         if (size == 2)
1644                 newcsum += le16_to_cpu(*(__le16*) isuper);
1645
1646         csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1647         sb->sb_csum = disk_csum;
1648         return cpu_to_le32(csum);
1649 }
1650
1651 static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
1652 {
1653         struct mdp_superblock_1 *sb;
1654         int ret;
1655         sector_t sb_start;
1656         sector_t sectors;
1657         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1658         int bmask;
1659         bool spare_disk = true;
1660
1661         /*
1662          * Calculate the position of the superblock in 512byte sectors.
1663          * It is always aligned to a 4K boundary and
1664          * depeding on minor_version, it can be:
1665          * 0: At least 8K, but less than 12K, from end of device
1666          * 1: At start of device
1667          * 2: 4K from start of device.
1668          */
1669         switch(minor_version) {
1670         case 0:
1671                 sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
1672                 sb_start -= 8*2;
1673                 sb_start &= ~(sector_t)(4*2-1);
1674                 break;
1675         case 1:
1676                 sb_start = 0;
1677                 break;
1678         case 2:
1679                 sb_start = 8;
1680                 break;
1681         default:
1682                 return -EINVAL;
1683         }
1684         rdev->sb_start = sb_start;
1685
1686         /* superblock is rarely larger than 1K, but it can be larger,
1687          * and it is safe to read 4k, so we do that
1688          */
1689         ret = read_disk_sb(rdev, 4096);
1690         if (ret) return ret;
1691
1692         sb = page_address(rdev->sb_page);
1693
1694         if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1695             sb->major_version != cpu_to_le32(1) ||
1696             le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1697             le64_to_cpu(sb->super_offset) != rdev->sb_start ||
1698             (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1699                 return -EINVAL;
1700
1701         if (calc_sb_1_csum(sb) != sb->sb_csum) {
1702                 pr_warn("md: invalid superblock checksum on %s\n",
1703                         bdevname(rdev->bdev,b));
1704                 return -EINVAL;
1705         }
1706         if (le64_to_cpu(sb->data_size) < 10) {
1707                 pr_warn("md: data_size too small on %s\n",
1708                         bdevname(rdev->bdev,b));
1709                 return -EINVAL;
1710         }
1711         if (sb->pad0 ||
1712             sb->pad3[0] ||
1713             memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1])))
1714                 /* Some padding is non-zero, might be a new feature */
1715                 return -EINVAL;
1716
1717         rdev->preferred_minor = 0xffff;
1718         rdev->data_offset = le64_to_cpu(sb->data_offset);
1719         rdev->new_data_offset = rdev->data_offset;
1720         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
1721             (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
1722                 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
1723         atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1724
1725         rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1726         bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1727         if (rdev->sb_size & bmask)
1728                 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1729
1730         if (minor_version
1731             && rdev->data_offset < sb_start + (rdev->sb_size/512))
1732                 return -EINVAL;
1733         if (minor_version
1734             && rdev->new_data_offset < sb_start + (rdev->sb_size/512))
1735                 return -EINVAL;
1736
1737         if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1738                 rdev->desc_nr = -1;
1739         else
1740                 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1741
1742         if (!rdev->bb_page) {
1743                 rdev->bb_page = alloc_page(GFP_KERNEL);
1744                 if (!rdev->bb_page)
1745                         return -ENOMEM;
1746         }
1747         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
1748             rdev->badblocks.count == 0) {
1749                 /* need to load the bad block list.
1750                  * Currently we limit it to one page.
1751                  */
1752                 s32 offset;
1753                 sector_t bb_sector;
1754                 __le64 *bbp;
1755                 int i;
1756                 int sectors = le16_to_cpu(sb->bblog_size);
1757                 if (sectors > (PAGE_SIZE / 512))
1758                         return -EINVAL;
1759                 offset = le32_to_cpu(sb->bblog_offset);
1760                 if (offset == 0)
1761                         return -EINVAL;
1762                 bb_sector = (long long)offset;
1763                 if (!sync_page_io(rdev, bb_sector, sectors << 9,
1764                                   rdev->bb_page, REQ_OP_READ, 0, true))
1765                         return -EIO;
1766                 bbp = (__le64 *)page_address(rdev->bb_page);
1767                 rdev->badblocks.shift = sb->bblog_shift;
1768                 for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
1769                         u64 bb = le64_to_cpu(*bbp);
1770                         int count = bb & (0x3ff);
1771                         u64 sector = bb >> 10;
1772                         sector <<= sb->bblog_shift;
1773                         count <<= sb->bblog_shift;
1774                         if (bb + 1 == 0)
1775                                 break;
1776                         if (badblocks_set(&rdev->badblocks, sector, count, 1))
1777                                 return -EINVAL;
1778                 }
1779         } else if (sb->bblog_offset != 0)
1780                 rdev->badblocks.shift = 0;
1781
1782         if ((le32_to_cpu(sb->feature_map) &
1783             (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS))) {
1784                 rdev->ppl.offset = (__s16)le16_to_cpu(sb->ppl.offset);
1785                 rdev->ppl.size = le16_to_cpu(sb->ppl.size);
1786                 rdev->ppl.sector = rdev->sb_start + rdev->ppl.offset;
1787         }
1788
1789         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT) &&
1790             sb->level != 0)
1791                 return -EINVAL;
1792
1793         /* not spare disk, or LEVEL_MULTIPATH */
1794         if (sb->level == cpu_to_le32(LEVEL_MULTIPATH) ||
1795                 (rdev->desc_nr >= 0 &&
1796                 rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1797                 (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
1798                  le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL)))
1799                 spare_disk = false;
1800
1801         if (!refdev) {
1802                 if (!spare_disk)
1803                         ret = 1;
1804                 else
1805                         ret = 0;
1806         } else {
1807                 __u64 ev1, ev2;
1808                 struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
1809
1810                 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1811                     sb->level != refsb->level ||
1812                     sb->layout != refsb->layout ||
1813                     sb->chunksize != refsb->chunksize) {
1814                         pr_warn("md: %s has strangely different superblock to %s\n",
1815                                 bdevname(rdev->bdev,b),
1816                                 bdevname(refdev->bdev,b2));
1817                         return -EINVAL;
1818                 }
1819                 ev1 = le64_to_cpu(sb->events);
1820                 ev2 = le64_to_cpu(refsb->events);
1821
1822                 if (!spare_disk && ev1 > ev2)
1823                         ret = 1;
1824                 else
1825                         ret = 0;
1826         }
1827         if (minor_version) {
1828                 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9);
1829                 sectors -= rdev->data_offset;
1830         } else
1831                 sectors = rdev->sb_start;
1832         if (sectors < le64_to_cpu(sb->data_size))
1833                 return -EINVAL;
1834         rdev->sectors = le64_to_cpu(sb->data_size);
1835         return ret;
1836 }
1837
1838 static int super_1_validate(struct mddev *mddev, struct md_rdev *freshest, struct md_rdev *rdev)
1839 {
1840         struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
1841         __u64 ev1 = le64_to_cpu(sb->events);
1842
1843         rdev->raid_disk = -1;
1844         clear_bit(Faulty, &rdev->flags);
1845         clear_bit(In_sync, &rdev->flags);
1846         clear_bit(Bitmap_sync, &rdev->flags);
1847         clear_bit(WriteMostly, &rdev->flags);
1848
1849         if (mddev->raid_disks == 0) {
1850                 mddev->major_version = 1;
1851                 mddev->patch_version = 0;
1852                 mddev->external = 0;
1853                 mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
1854                 mddev->ctime = le64_to_cpu(sb->ctime);
1855                 mddev->utime = le64_to_cpu(sb->utime);
1856                 mddev->level = le32_to_cpu(sb->level);
1857                 mddev->clevel[0] = 0;
1858                 mddev->layout = le32_to_cpu(sb->layout);
1859                 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1860                 mddev->dev_sectors = le64_to_cpu(sb->size);
1861                 mddev->events = ev1;
1862                 mddev->bitmap_info.offset = 0;
1863                 mddev->bitmap_info.space = 0;
1864                 /* Default location for bitmap is 1K after superblock
1865                  * using 3K - total of 4K
1866                  */
1867                 mddev->bitmap_info.default_offset = 1024 >> 9;
1868                 mddev->bitmap_info.default_space = (4096-1024) >> 9;
1869                 mddev->reshape_backwards = 0;
1870
1871                 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1872                 memcpy(mddev->uuid, sb->set_uuid, 16);
1873
1874                 mddev->max_disks =  (4096-256)/2;
1875
1876                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1877                     mddev->bitmap_info.file == NULL) {
1878                         mddev->bitmap_info.offset =
1879                                 (__s32)le32_to_cpu(sb->bitmap_offset);
1880                         /* Metadata doesn't record how much space is available.
1881                          * For 1.0, we assume we can use up to the superblock
1882                          * if before, else to 4K beyond superblock.
1883                          * For others, assume no change is possible.
1884                          */
1885                         if (mddev->minor_version > 0)
1886                                 mddev->bitmap_info.space = 0;
1887                         else if (mddev->bitmap_info.offset > 0)
1888                                 mddev->bitmap_info.space =
1889                                         8 - mddev->bitmap_info.offset;
1890                         else
1891                                 mddev->bitmap_info.space =
1892                                         -mddev->bitmap_info.offset;
1893                 }
1894
1895                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1896                         mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1897                         mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1898                         mddev->new_level = le32_to_cpu(sb->new_level);
1899                         mddev->new_layout = le32_to_cpu(sb->new_layout);
1900                         mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
1901                         if (mddev->delta_disks < 0 ||
1902                             (mddev->delta_disks == 0 &&
1903                              (le32_to_cpu(sb->feature_map)
1904                               & MD_FEATURE_RESHAPE_BACKWARDS)))
1905                                 mddev->reshape_backwards = 1;
1906                 } else {
1907                         mddev->reshape_position = MaxSector;
1908                         mddev->delta_disks = 0;
1909                         mddev->new_level = mddev->level;
1910                         mddev->new_layout = mddev->layout;
1911                         mddev->new_chunk_sectors = mddev->chunk_sectors;
1912                 }
1913
1914                 if (mddev->level == 0 &&
1915                     !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT))
1916                         mddev->layout = -1;
1917
1918                 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)
1919                         set_bit(MD_HAS_JOURNAL, &mddev->flags);
1920
1921                 if (le32_to_cpu(sb->feature_map) &
1922                     (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS)) {
1923                         if (le32_to_cpu(sb->feature_map) &
1924                             (MD_FEATURE_BITMAP_OFFSET | MD_FEATURE_JOURNAL))
1925                                 return -EINVAL;
1926                         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_PPL) &&
1927                             (le32_to_cpu(sb->feature_map) &
1928                                             MD_FEATURE_MULTIPLE_PPLS))
1929                                 return -EINVAL;
1930                         set_bit(MD_HAS_PPL, &mddev->flags);
1931                 }
1932         } else if (mddev->pers == NULL) {
1933                 /* Insist of good event counter while assembling, except for
1934                  * spares (which don't need an event count).
1935                  * Similar to mdadm, we allow event counter difference of 1
1936                  * from the freshest device.
1937                  */
1938                 if (rdev->desc_nr >= 0 &&
1939                     rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1940                     (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
1941                      le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL))
1942                         if (ev1 + 1 < mddev->events)
1943                                 return -EINVAL;
1944         } else if (mddev->bitmap) {
1945                 /* If adding to array with a bitmap, then we can accept an
1946                  * older device, but not too old.
1947                  */
1948                 if (ev1 < mddev->bitmap->events_cleared)
1949                         return 0;
1950                 if (ev1 < mddev->events)
1951                         set_bit(Bitmap_sync, &rdev->flags);
1952         } else {
1953                 if (ev1 < mddev->events)
1954                         /* just a hot-add of a new device, leave raid_disk at -1 */
1955                         return 0;
1956         }
1957         if (mddev->level != LEVEL_MULTIPATH) {
1958                 int role;
1959                 if (rdev->desc_nr < 0 ||
1960                     rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
1961                         role = MD_DISK_ROLE_SPARE;
1962                         rdev->desc_nr = -1;
1963                 } else if (mddev->pers == NULL && freshest && ev1 < mddev->events) {
1964                         /*
1965                          * If we are assembling, and our event counter is smaller than the
1966                          * highest event counter, we cannot trust our superblock about the role.
1967                          * It could happen that our rdev was marked as Faulty, and all other
1968                          * superblocks were updated with +1 event counter.
1969                          * Then, before the next superblock update, which typically happens when
1970                          * remove_and_add_spares() removes the device from the array, there was
1971                          * a crash or reboot.
1972                          * If we allow current rdev without consulting the freshest superblock,
1973                          * we could cause data corruption.
1974                          * Note that in this case our event counter is smaller by 1 than the
1975                          * highest, otherwise, this rdev would not be allowed into array;
1976                          * both kernel and mdadm allow event counter difference of 1.
1977                          */
1978                         struct mdp_superblock_1 *freshest_sb = page_address(freshest->sb_page);
1979                         u32 freshest_max_dev = le32_to_cpu(freshest_sb->max_dev);
1980
1981                         if (rdev->desc_nr >= freshest_max_dev) {
1982                                 /* this is unexpected, better not proceed */
1983                                 pr_warn("md: %s: rdev[%pg]: desc_nr(%d) >= freshest(%pg)->sb->max_dev(%u)\n",
1984                                                 mdname(mddev), rdev->bdev, rdev->desc_nr,
1985                                                 freshest->bdev, freshest_max_dev);
1986                                 return -EUCLEAN;
1987                         }
1988
1989                         role = le16_to_cpu(freshest_sb->dev_roles[rdev->desc_nr]);
1990                         pr_debug("md: %s: rdev[%pg]: role=%d(0x%x) according to freshest %pg\n",
1991                                      mdname(mddev), rdev->bdev, role, role, freshest->bdev);
1992                 } else {
1993                         role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1994                 }
1995                 switch(role) {
1996                 case MD_DISK_ROLE_SPARE: /* spare */
1997                         break;
1998                 case MD_DISK_ROLE_FAULTY: /* faulty */
1999                         set_bit(Faulty, &rdev->flags);
2000                         break;
2001                 case MD_DISK_ROLE_JOURNAL: /* journal device */
2002                         if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) {
2003                                 /* journal device without journal feature */
2004                                 pr_warn("md: journal device provided without journal feature, ignoring the device\n");
2005                                 return -EINVAL;
2006                         }
2007                         set_bit(Journal, &rdev->flags);
2008                         rdev->journal_tail = le64_to_cpu(sb->journal_tail);
2009                         rdev->raid_disk = 0;
2010                         break;
2011                 default:
2012                         rdev->saved_raid_disk = role;
2013                         if ((le32_to_cpu(sb->feature_map) &
2014                              MD_FEATURE_RECOVERY_OFFSET)) {
2015                                 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
2016                                 if (!(le32_to_cpu(sb->feature_map) &
2017                                       MD_FEATURE_RECOVERY_BITMAP))
2018                                         rdev->saved_raid_disk = -1;
2019                         } else {
2020                                 /*
2021                                  * If the array is FROZEN, then the device can't
2022                                  * be in_sync with rest of array.
2023                                  */
2024                                 if (!test_bit(MD_RECOVERY_FROZEN,
2025                                               &mddev->recovery))
2026                                         set_bit(In_sync, &rdev->flags);
2027                         }
2028                         rdev->raid_disk = role;
2029                         break;
2030                 }
2031                 if (sb->devflags & WriteMostly1)
2032                         set_bit(WriteMostly, &rdev->flags);
2033                 if (sb->devflags & FailFast1)
2034                         set_bit(FailFast, &rdev->flags);
2035                 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
2036                         set_bit(Replacement, &rdev->flags);
2037         } else /* MULTIPATH are always insync */
2038                 set_bit(In_sync, &rdev->flags);
2039
2040         return 0;
2041 }
2042
2043 static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
2044 {
2045         struct mdp_superblock_1 *sb;
2046         struct md_rdev *rdev2;
2047         int max_dev, i;
2048         /* make rdev->sb match mddev and rdev data. */
2049
2050         sb = page_address(rdev->sb_page);
2051
2052         sb->feature_map = 0;
2053         sb->pad0 = 0;
2054         sb->recovery_offset = cpu_to_le64(0);
2055         memset(sb->pad3, 0, sizeof(sb->pad3));
2056
2057         sb->utime = cpu_to_le64((__u64)mddev->utime);
2058         sb->events = cpu_to_le64(mddev->events);
2059         if (mddev->in_sync)
2060                 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
2061         else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags))
2062                 sb->resync_offset = cpu_to_le64(MaxSector);
2063         else
2064                 sb->resync_offset = cpu_to_le64(0);
2065
2066         sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
2067
2068         sb->raid_disks = cpu_to_le32(mddev->raid_disks);
2069         sb->size = cpu_to_le64(mddev->dev_sectors);
2070         sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
2071         sb->level = cpu_to_le32(mddev->level);
2072         sb->layout = cpu_to_le32(mddev->layout);
2073         if (test_bit(FailFast, &rdev->flags))
2074                 sb->devflags |= FailFast1;
2075         else
2076                 sb->devflags &= ~FailFast1;
2077
2078         if (test_bit(WriteMostly, &rdev->flags))
2079                 sb->devflags |= WriteMostly1;
2080         else
2081                 sb->devflags &= ~WriteMostly1;
2082         sb->data_offset = cpu_to_le64(rdev->data_offset);
2083         sb->data_size = cpu_to_le64(rdev->sectors);
2084
2085         if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
2086                 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
2087                 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
2088         }
2089
2090         if (rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags) &&
2091             !test_bit(In_sync, &rdev->flags)) {
2092                 sb->feature_map |=
2093                         cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
2094                 sb->recovery_offset =
2095                         cpu_to_le64(rdev->recovery_offset);
2096                 if (rdev->saved_raid_disk >= 0 && mddev->bitmap)
2097                         sb->feature_map |=
2098                                 cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP);
2099         }
2100         /* Note: recovery_offset and journal_tail share space  */
2101         if (test_bit(Journal, &rdev->flags))
2102                 sb->journal_tail = cpu_to_le64(rdev->journal_tail);
2103         if (test_bit(Replacement, &rdev->flags))
2104                 sb->feature_map |=
2105                         cpu_to_le32(MD_FEATURE_REPLACEMENT);
2106
2107         if (mddev->reshape_position != MaxSector) {
2108                 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
2109                 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
2110                 sb->new_layout = cpu_to_le32(mddev->new_layout);
2111                 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
2112                 sb->new_level = cpu_to_le32(mddev->new_level);
2113                 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
2114                 if (mddev->delta_disks == 0 &&
2115                     mddev->reshape_backwards)
2116                         sb->feature_map
2117                                 |= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS);
2118                 if (rdev->new_data_offset != rdev->data_offset) {
2119                         sb->feature_map
2120                                 |= cpu_to_le32(MD_FEATURE_NEW_OFFSET);
2121                         sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset
2122                                                              - rdev->data_offset));
2123                 }
2124         }
2125
2126         if (mddev_is_clustered(mddev))
2127                 sb->feature_map |= cpu_to_le32(MD_FEATURE_CLUSTERED);
2128
2129         if (rdev->badblocks.count == 0)
2130                 /* Nothing to do for bad blocks*/ ;
2131         else if (sb->bblog_offset == 0)
2132                 /* Cannot record bad blocks on this device */
2133                 md_error(mddev, rdev);
2134         else {
2135                 struct badblocks *bb = &rdev->badblocks;
2136                 __le64 *bbp = (__le64 *)page_address(rdev->bb_page);
2137                 u64 *p = bb->page;
2138                 sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
2139                 if (bb->changed) {
2140                         unsigned seq;
2141
2142 retry:
2143                         seq = read_seqbegin(&bb->lock);
2144
2145                         memset(bbp, 0xff, PAGE_SIZE);
2146
2147                         for (i = 0 ; i < bb->count ; i++) {
2148                                 u64 internal_bb = p[i];
2149                                 u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
2150                                                 | BB_LEN(internal_bb));
2151                                 bbp[i] = cpu_to_le64(store_bb);
2152                         }
2153                         bb->changed = 0;
2154                         if (read_seqretry(&bb->lock, seq))
2155                                 goto retry;
2156
2157                         bb->sector = (rdev->sb_start +
2158                                       (int)le32_to_cpu(sb->bblog_offset));
2159                         bb->size = le16_to_cpu(sb->bblog_size);
2160                 }
2161         }
2162
2163         max_dev = 0;
2164         rdev_for_each(rdev2, mddev)
2165                 if (rdev2->desc_nr+1 > max_dev)
2166                         max_dev = rdev2->desc_nr+1;
2167
2168         if (max_dev > le32_to_cpu(sb->max_dev)) {
2169                 int bmask;
2170                 sb->max_dev = cpu_to_le32(max_dev);
2171                 rdev->sb_size = max_dev * 2 + 256;
2172                 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
2173                 if (rdev->sb_size & bmask)
2174                         rdev->sb_size = (rdev->sb_size | bmask) + 1;
2175         } else
2176                 max_dev = le32_to_cpu(sb->max_dev);
2177
2178         for (i=0; i<max_dev;i++)
2179                 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
2180
2181         if (test_bit(MD_HAS_JOURNAL, &mddev->flags))
2182                 sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL);
2183
2184         if (test_bit(MD_HAS_PPL, &mddev->flags)) {
2185                 if (test_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags))
2186                         sb->feature_map |=
2187                             cpu_to_le32(MD_FEATURE_MULTIPLE_PPLS);
2188                 else
2189                         sb->feature_map |= cpu_to_le32(MD_FEATURE_PPL);
2190                 sb->ppl.offset = cpu_to_le16(rdev->ppl.offset);
2191                 sb->ppl.size = cpu_to_le16(rdev->ppl.size);
2192         }
2193
2194         rdev_for_each(rdev2, mddev) {
2195                 i = rdev2->desc_nr;
2196                 if (test_bit(Faulty, &rdev2->flags))
2197                         sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY);
2198                 else if (test_bit(In_sync, &rdev2->flags))
2199                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
2200                 else if (test_bit(Journal, &rdev2->flags))
2201                         sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_JOURNAL);
2202                 else if (rdev2->raid_disk >= 0)
2203                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
2204                 else
2205                         sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
2206         }
2207
2208         sb->sb_csum = calc_sb_1_csum(sb);
2209 }
2210
2211 static sector_t super_1_choose_bm_space(sector_t dev_size)
2212 {
2213         sector_t bm_space;
2214
2215         /* if the device is bigger than 8Gig, save 64k for bitmap
2216          * usage, if bigger than 200Gig, save 128k
2217          */
2218         if (dev_size < 64*2)
2219                 bm_space = 0;
2220         else if (dev_size - 64*2 >= 200*1024*1024*2)
2221                 bm_space = 128*2;
2222         else if (dev_size - 4*2 > 8*1024*1024*2)
2223                 bm_space = 64*2;
2224         else
2225                 bm_space = 4*2;
2226         return bm_space;
2227 }
2228
2229 static unsigned long long
2230 super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
2231 {
2232         struct mdp_superblock_1 *sb;
2233         sector_t max_sectors;
2234         if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
2235                 return 0; /* component must fit device */
2236         if (rdev->data_offset != rdev->new_data_offset)
2237                 return 0; /* too confusing */
2238         if (rdev->sb_start < rdev->data_offset) {
2239                 /* minor versions 1 and 2; superblock before data */
2240                 max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
2241                 max_sectors -= rdev->data_offset;
2242                 if (!num_sectors || num_sectors > max_sectors)
2243                         num_sectors = max_sectors;
2244         } else if (rdev->mddev->bitmap_info.offset) {
2245                 /* minor version 0 with bitmap we can't move */
2246                 return 0;
2247         } else {
2248                 /* minor version 0; superblock after data */
2249                 sector_t sb_start, bm_space;
2250                 sector_t dev_size = i_size_read(rdev->bdev->bd_inode) >> 9;
2251
2252                 /* 8K is for superblock */
2253                 sb_start = dev_size - 8*2;
2254                 sb_start &= ~(sector_t)(4*2 - 1);
2255
2256                 bm_space = super_1_choose_bm_space(dev_size);
2257
2258                 /* Space that can be used to store date needs to decrease
2259                  * superblock bitmap space and bad block space(4K)
2260                  */
2261                 max_sectors = sb_start - bm_space - 4*2;
2262
2263                 if (!num_sectors || num_sectors > max_sectors)
2264                         num_sectors = max_sectors;
2265                 rdev->sb_start = sb_start;
2266         }
2267         sb = page_address(rdev->sb_page);
2268         sb->data_size = cpu_to_le64(num_sectors);
2269         sb->super_offset = cpu_to_le64(rdev->sb_start);
2270         sb->sb_csum = calc_sb_1_csum(sb);
2271         do {
2272                 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
2273                                rdev->sb_page);
2274         } while (md_super_wait(rdev->mddev) < 0);
2275         return num_sectors;
2276
2277 }
2278
2279 static int
2280 super_1_allow_new_offset(struct md_rdev *rdev,
2281                          unsigned long long new_offset)
2282 {
2283         /* All necessary checks on new >= old have been done */
2284         struct bitmap *bitmap;
2285         if (new_offset >= rdev->data_offset)
2286                 return 1;
2287
2288         /* with 1.0 metadata, there is no metadata to tread on
2289          * so we can always move back */
2290         if (rdev->mddev->minor_version == 0)
2291                 return 1;
2292
2293         /* otherwise we must be sure not to step on
2294          * any metadata, so stay:
2295          * 36K beyond start of superblock
2296          * beyond end of badblocks
2297          * beyond write-intent bitmap
2298          */
2299         if (rdev->sb_start + (32+4)*2 > new_offset)
2300                 return 0;
2301         bitmap = rdev->mddev->bitmap;
2302         if (bitmap && !rdev->mddev->bitmap_info.file &&
2303             rdev->sb_start + rdev->mddev->bitmap_info.offset +
2304             bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset)
2305                 return 0;
2306         if (rdev->badblocks.sector + rdev->badblocks.size > new_offset)
2307                 return 0;
2308
2309         return 1;
2310 }
2311
2312 static struct super_type super_types[] = {
2313         [0] = {
2314                 .name   = "0.90.0",
2315                 .owner  = THIS_MODULE,
2316                 .load_super         = super_90_load,
2317                 .validate_super     = super_90_validate,
2318                 .sync_super         = super_90_sync,
2319                 .rdev_size_change   = super_90_rdev_size_change,
2320                 .allow_new_offset   = super_90_allow_new_offset,
2321         },
2322         [1] = {
2323                 .name   = "md-1",
2324                 .owner  = THIS_MODULE,
2325                 .load_super         = super_1_load,
2326                 .validate_super     = super_1_validate,
2327                 .sync_super         = super_1_sync,
2328                 .rdev_size_change   = super_1_rdev_size_change,
2329                 .allow_new_offset   = super_1_allow_new_offset,
2330         },
2331 };
2332
2333 static void sync_super(struct mddev *mddev, struct md_rdev *rdev)
2334 {
2335         if (mddev->sync_super) {
2336                 mddev->sync_super(mddev, rdev);
2337                 return;
2338         }
2339
2340         BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
2341
2342         super_types[mddev->major_version].sync_super(mddev, rdev);
2343 }
2344
2345 static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2)
2346 {
2347         struct md_rdev *rdev, *rdev2;
2348
2349         rcu_read_lock();
2350         rdev_for_each_rcu(rdev, mddev1) {
2351                 if (test_bit(Faulty, &rdev->flags) ||
2352                     test_bit(Journal, &rdev->flags) ||
2353                     rdev->raid_disk == -1)
2354                         continue;
2355                 rdev_for_each_rcu(rdev2, mddev2) {
2356                         if (test_bit(Faulty, &rdev2->flags) ||
2357                             test_bit(Journal, &rdev2->flags) ||
2358                             rdev2->raid_disk == -1)
2359                                 continue;
2360                         if (rdev->bdev->bd_disk == rdev2->bdev->bd_disk) {
2361                                 rcu_read_unlock();
2362                                 return 1;
2363                         }
2364                 }
2365         }
2366         rcu_read_unlock();
2367         return 0;
2368 }
2369
2370 static LIST_HEAD(pending_raid_disks);
2371
2372 /*
2373  * Try to register data integrity profile for an mddev
2374  *
2375  * This is called when an array is started and after a disk has been kicked
2376  * from the array. It only succeeds if all working and active component devices
2377  * are integrity capable with matching profiles.
2378  */
2379 int md_integrity_register(struct mddev *mddev)
2380 {
2381         struct md_rdev *rdev, *reference = NULL;
2382
2383         if (list_empty(&mddev->disks))
2384                 return 0; /* nothing to do */
2385         if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
2386                 return 0; /* shouldn't register, or already is */
2387         rdev_for_each(rdev, mddev) {
2388                 /* skip spares and non-functional disks */
2389                 if (test_bit(Faulty, &rdev->flags))
2390                         continue;
2391                 if (rdev->raid_disk < 0)
2392                         continue;
2393                 if (!reference) {
2394                         /* Use the first rdev as the reference */
2395                         reference = rdev;
2396                         continue;
2397                 }
2398                 /* does this rdev's profile match the reference profile? */
2399                 if (blk_integrity_compare(reference->bdev->bd_disk,
2400                                 rdev->bdev->bd_disk) < 0)
2401                         return -EINVAL;
2402         }
2403         if (!reference || !bdev_get_integrity(reference->bdev))
2404                 return 0;
2405         /*
2406          * All component devices are integrity capable and have matching
2407          * profiles, register the common profile for the md device.
2408          */
2409         blk_integrity_register(mddev->gendisk,
2410                                bdev_get_integrity(reference->bdev));
2411
2412         pr_debug("md: data integrity enabled on %s\n", mdname(mddev));
2413         if (bioset_integrity_create(&mddev->bio_set, BIO_POOL_SIZE)) {
2414                 pr_err("md: failed to create integrity pool for %s\n",
2415                        mdname(mddev));
2416                 return -EINVAL;
2417         }
2418         return 0;
2419 }
2420 EXPORT_SYMBOL(md_integrity_register);
2421
2422 /*
2423  * Attempt to add an rdev, but only if it is consistent with the current
2424  * integrity profile
2425  */
2426 int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
2427 {
2428         struct blk_integrity *bi_mddev;
2429         char name[BDEVNAME_SIZE];
2430
2431         if (!mddev->gendisk)
2432                 return 0;
2433
2434         bi_mddev = blk_get_integrity(mddev->gendisk);
2435
2436         if (!bi_mddev) /* nothing to do */
2437                 return 0;
2438
2439         if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) {
2440                 pr_err("%s: incompatible integrity profile for %s\n",
2441                        mdname(mddev), bdevname(rdev->bdev, name));
2442                 return -ENXIO;
2443         }
2444
2445         return 0;
2446 }
2447 EXPORT_SYMBOL(md_integrity_add_rdev);
2448
2449 static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
2450 {
2451         char b[BDEVNAME_SIZE];
2452         struct kobject *ko;
2453         int err;
2454
2455         /* prevent duplicates */
2456         if (find_rdev(mddev, rdev->bdev->bd_dev))
2457                 return -EEXIST;
2458
2459         if ((bdev_read_only(rdev->bdev) || bdev_read_only(rdev->meta_bdev)) &&
2460             mddev->pers)
2461                 return -EROFS;
2462
2463         /* make sure rdev->sectors exceeds mddev->dev_sectors */
2464         if (!test_bit(Journal, &rdev->flags) &&
2465             rdev->sectors &&
2466             (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) {
2467                 if (mddev->pers) {
2468                         /* Cannot change size, so fail
2469                          * If mddev->level <= 0, then we don't care
2470                          * about aligning sizes (e.g. linear)
2471                          */
2472                         if (mddev->level > 0)
2473                                 return -ENOSPC;
2474                 } else
2475                         mddev->dev_sectors = rdev->sectors;
2476         }
2477
2478         /* Verify rdev->desc_nr is unique.
2479          * If it is -1, assign a free number, else
2480          * check number is not in use
2481          */
2482         rcu_read_lock();
2483         if (rdev->desc_nr < 0) {
2484                 int choice = 0;
2485                 if (mddev->pers)
2486                         choice = mddev->raid_disks;
2487                 while (md_find_rdev_nr_rcu(mddev, choice))
2488                         choice++;
2489                 rdev->desc_nr = choice;
2490         } else {
2491                 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) {
2492                         rcu_read_unlock();
2493                         return -EBUSY;
2494                 }
2495         }
2496         rcu_read_unlock();
2497         if (!test_bit(Journal, &rdev->flags) &&
2498             mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
2499                 pr_warn("md: %s: array is limited to %d devices\n",
2500                         mdname(mddev), mddev->max_disks);
2501                 return -EBUSY;
2502         }
2503         bdevname(rdev->bdev,b);
2504         strreplace(b, '/', '!');
2505
2506         rdev->mddev = mddev;
2507         pr_debug("md: bind<%s>\n", b);
2508
2509         if (mddev->raid_disks)
2510                 mddev_create_serial_pool(mddev, rdev, false);
2511
2512         if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
2513                 goto fail;
2514
2515         ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
2516         /* failure here is OK */
2517         err = sysfs_create_link(&rdev->kobj, ko, "block");
2518         rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
2519         rdev->sysfs_unack_badblocks =
2520                 sysfs_get_dirent_safe(rdev->kobj.sd, "unacknowledged_bad_blocks");
2521         rdev->sysfs_badblocks =
2522                 sysfs_get_dirent_safe(rdev->kobj.sd, "bad_blocks");
2523
2524         list_add_rcu(&rdev->same_set, &mddev->disks);
2525         bd_link_disk_holder(rdev->bdev, mddev->gendisk);
2526
2527         /* May as well allow recovery to be retried once */
2528         mddev->recovery_disabled++;
2529
2530         return 0;
2531
2532  fail:
2533         pr_warn("md: failed to register dev-%s for %s\n",
2534                 b, mdname(mddev));
2535         mddev_destroy_serial_pool(mddev, rdev, false);
2536         return err;
2537 }
2538
2539 static void rdev_delayed_delete(struct work_struct *ws)
2540 {
2541         struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work);
2542         kobject_del(&rdev->kobj);
2543         kobject_put(&rdev->kobj);
2544 }
2545
2546 static void unbind_rdev_from_array(struct md_rdev *rdev)
2547 {
2548         char b[BDEVNAME_SIZE];
2549
2550         bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
2551         list_del_rcu(&rdev->same_set);
2552         pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b));
2553         mddev_destroy_serial_pool(rdev->mddev, rdev, false);
2554         rdev->mddev = NULL;
2555         sysfs_remove_link(&rdev->kobj, "block");
2556         sysfs_put(rdev->sysfs_state);
2557         sysfs_put(rdev->sysfs_unack_badblocks);
2558         sysfs_put(rdev->sysfs_badblocks);
2559         rdev->sysfs_state = NULL;
2560         rdev->sysfs_unack_badblocks = NULL;
2561         rdev->sysfs_badblocks = NULL;
2562         rdev->badblocks.count = 0;
2563         /* We need to delay this, otherwise we can deadlock when
2564          * writing to 'remove' to "dev/state".  We also need
2565          * to delay it due to rcu usage.
2566          */
2567         synchronize_rcu();
2568         INIT_WORK(&rdev->del_work, rdev_delayed_delete);
2569         kobject_get(&rdev->kobj);
2570         queue_work(md_rdev_misc_wq, &rdev->del_work);
2571 }
2572
2573 /*
2574  * prevent the device from being mounted, repartitioned or
2575  * otherwise reused by a RAID array (or any other kernel
2576  * subsystem), by bd_claiming the device.
2577  */
2578 static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared)
2579 {
2580         int err = 0;
2581         struct block_device *bdev;
2582
2583         bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
2584                                  shared ? (struct md_rdev *)lock_rdev : rdev);
2585         if (IS_ERR(bdev)) {
2586                 pr_warn("md: could not open device unknown-block(%u,%u).\n",
2587                         MAJOR(dev), MINOR(dev));
2588                 return PTR_ERR(bdev);
2589         }
2590         rdev->bdev = bdev;
2591         return err;
2592 }
2593
2594 static void unlock_rdev(struct md_rdev *rdev)
2595 {
2596         struct block_device *bdev = rdev->bdev;
2597         rdev->bdev = NULL;
2598         blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
2599 }
2600
2601 void md_autodetect_dev(dev_t dev);
2602
2603 static void export_rdev(struct md_rdev *rdev)
2604 {
2605         char b[BDEVNAME_SIZE];
2606
2607         pr_debug("md: export_rdev(%s)\n", bdevname(rdev->bdev,b));
2608         md_rdev_clear(rdev);
2609 #ifndef MODULE
2610         if (test_bit(AutoDetected, &rdev->flags))
2611                 md_autodetect_dev(rdev->bdev->bd_dev);
2612 #endif
2613         unlock_rdev(rdev);
2614         kobject_put(&rdev->kobj);
2615 }
2616
2617 void md_kick_rdev_from_array(struct md_rdev *rdev)
2618 {
2619         unbind_rdev_from_array(rdev);
2620         export_rdev(rdev);
2621 }
2622 EXPORT_SYMBOL_GPL(md_kick_rdev_from_array);
2623
2624 static void export_array(struct mddev *mddev)
2625 {
2626         struct md_rdev *rdev;
2627
2628         while (!list_empty(&mddev->disks)) {
2629                 rdev = list_first_entry(&mddev->disks, struct md_rdev,
2630                                         same_set);
2631                 md_kick_rdev_from_array(rdev);
2632         }
2633         mddev->raid_disks = 0;
2634         mddev->major_version = 0;
2635 }
2636
2637 static bool set_in_sync(struct mddev *mddev)
2638 {
2639         lockdep_assert_held(&mddev->lock);
2640         if (!mddev->in_sync) {
2641                 mddev->sync_checkers++;
2642                 spin_unlock(&mddev->lock);
2643                 percpu_ref_switch_to_atomic_sync(&mddev->writes_pending);
2644                 spin_lock(&mddev->lock);
2645                 if (!mddev->in_sync &&
2646                     percpu_ref_is_zero(&mddev->writes_pending)) {
2647                         mddev->in_sync = 1;
2648                         /*
2649                          * Ensure ->in_sync is visible before we clear
2650                          * ->sync_checkers.
2651                          */
2652                         smp_mb();
2653                         set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
2654                         sysfs_notify_dirent_safe(mddev->sysfs_state);
2655                 }
2656                 if (--mddev->sync_checkers == 0)
2657                         percpu_ref_switch_to_percpu(&mddev->writes_pending);
2658         }
2659         if (mddev->safemode == 1)
2660                 mddev->safemode = 0;
2661         return mddev->in_sync;
2662 }
2663
2664 static void sync_sbs(struct mddev *mddev, int nospares)
2665 {
2666         /* Update each superblock (in-memory image), but
2667          * if we are allowed to, skip spares which already
2668          * have the right event counter, or have one earlier
2669          * (which would mean they aren't being marked as dirty
2670          * with the rest of the array)
2671          */
2672         struct md_rdev *rdev;
2673         rdev_for_each(rdev, mddev) {
2674                 if (rdev->sb_events == mddev->events ||
2675                     (nospares &&
2676                      rdev->raid_disk < 0 &&
2677                      rdev->sb_events+1 == mddev->events)) {
2678                         /* Don't update this superblock */
2679                         rdev->sb_loaded = 2;
2680                 } else {
2681                         sync_super(mddev, rdev);
2682                         rdev->sb_loaded = 1;
2683                 }
2684         }
2685 }
2686
2687 static bool does_sb_need_changing(struct mddev *mddev)
2688 {
2689         struct md_rdev *rdev = NULL, *iter;
2690         struct mdp_superblock_1 *sb;
2691         int role;
2692
2693         /* Find a good rdev */
2694         rdev_for_each(iter, mddev)
2695                 if ((iter->raid_disk >= 0) && !test_bit(Faulty, &iter->flags)) {
2696                         rdev = iter;
2697                         break;
2698                 }
2699
2700         /* No good device found. */
2701         if (!rdev)
2702                 return false;
2703
2704         sb = page_address(rdev->sb_page);
2705         /* Check if a device has become faulty or a spare become active */
2706         rdev_for_each(rdev, mddev) {
2707                 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
2708                 /* Device activated? */
2709                 if (role == 0xffff && rdev->raid_disk >=0 &&
2710                     !test_bit(Faulty, &rdev->flags))
2711                         return true;
2712                 /* Device turned faulty? */
2713                 if (test_bit(Faulty, &rdev->flags) && (role < 0xfffd))
2714                         return true;
2715         }
2716
2717         /* Check if any mddev parameters have changed */
2718         if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
2719             (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
2720             (mddev->layout != le32_to_cpu(sb->layout)) ||
2721             (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
2722             (mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
2723                 return true;
2724
2725         return false;
2726 }
2727
2728 void md_update_sb(struct mddev *mddev, int force_change)
2729 {
2730         struct md_rdev *rdev;
2731         int sync_req;
2732         int nospares = 0;
2733         int any_badblocks_changed = 0;
2734         int ret = -1;
2735
2736         if (mddev->ro) {
2737                 if (force_change)
2738                         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2739                 return;
2740         }
2741
2742 repeat:
2743         if (mddev_is_clustered(mddev)) {
2744                 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
2745                         force_change = 1;
2746                 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
2747                         nospares = 1;
2748                 ret = md_cluster_ops->metadata_update_start(mddev);
2749                 /* Has someone else has updated the sb */
2750                 if (!does_sb_need_changing(mddev)) {
2751                         if (ret == 0)
2752                                 md_cluster_ops->metadata_update_cancel(mddev);
2753                         bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
2754                                                          BIT(MD_SB_CHANGE_DEVS) |
2755                                                          BIT(MD_SB_CHANGE_CLEAN));
2756                         return;
2757                 }
2758         }
2759
2760         /*
2761          * First make sure individual recovery_offsets are correct
2762          * curr_resync_completed can only be used during recovery.
2763          * During reshape/resync it might use array-addresses rather
2764          * that device addresses.
2765          */
2766         rdev_for_each(rdev, mddev) {
2767                 if (rdev->raid_disk >= 0 &&
2768                     mddev->delta_disks >= 0 &&
2769                     test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
2770                     test_bit(MD_RECOVERY_RECOVER, &mddev->recovery) &&
2771                     !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
2772                     !test_bit(Journal, &rdev->flags) &&
2773                     !test_bit(In_sync, &rdev->flags) &&
2774                     mddev->curr_resync_completed > rdev->recovery_offset)
2775                                 rdev->recovery_offset = mddev->curr_resync_completed;
2776
2777         }
2778         if (!mddev->persistent) {
2779                 clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
2780                 clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2781                 if (!mddev->external) {
2782                         clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
2783                         rdev_for_each(rdev, mddev) {
2784                                 if (rdev->badblocks.changed) {
2785                                         rdev->badblocks.changed = 0;
2786                                         ack_all_badblocks(&rdev->badblocks);
2787                                         md_error(mddev, rdev);
2788                                 }
2789                                 clear_bit(Blocked, &rdev->flags);
2790                                 clear_bit(BlockedBadBlocks, &rdev->flags);
2791                                 wake_up(&rdev->blocked_wait);
2792                         }
2793                 }
2794                 wake_up(&mddev->sb_wait);
2795                 return;
2796         }
2797
2798         spin_lock(&mddev->lock);
2799
2800         mddev->utime = ktime_get_real_seconds();
2801
2802         if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
2803                 force_change = 1;
2804         if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
2805                 /* just a clean<-> dirty transition, possibly leave spares alone,
2806                  * though if events isn't the right even/odd, we will have to do
2807                  * spares after all
2808                  */
2809                 nospares = 1;
2810         if (force_change)
2811                 nospares = 0;
2812         if (mddev->degraded)
2813                 /* If the array is degraded, then skipping spares is both
2814                  * dangerous and fairly pointless.
2815                  * Dangerous because a device that was removed from the array
2816                  * might have a event_count that still looks up-to-date,
2817                  * so it can be re-added without a resync.
2818                  * Pointless because if there are any spares to skip,
2819                  * then a recovery will happen and soon that array won't
2820                  * be degraded any more and the spare can go back to sleep then.
2821                  */
2822                 nospares = 0;
2823
2824         sync_req = mddev->in_sync;
2825
2826         /* If this is just a dirty<->clean transition, and the array is clean
2827          * and 'events' is odd, we can roll back to the previous clean state */
2828         if (nospares
2829             && (mddev->in_sync && mddev->recovery_cp == MaxSector)
2830             && mddev->can_decrease_events
2831             && mddev->events != 1) {
2832                 mddev->events--;
2833                 mddev->can_decrease_events = 0;
2834         } else {
2835                 /* otherwise we have to go forward and ... */
2836                 mddev->events ++;
2837                 mddev->can_decrease_events = nospares;
2838         }
2839
2840         /*
2841          * This 64-bit counter should never wrap.
2842          * Either we are in around ~1 trillion A.C., assuming
2843          * 1 reboot per second, or we have a bug...
2844          */
2845         WARN_ON(mddev->events == 0);
2846
2847         rdev_for_each(rdev, mddev) {
2848                 if (rdev->badblocks.changed)
2849                         any_badblocks_changed++;
2850                 if (test_bit(Faulty, &rdev->flags))
2851                         set_bit(FaultRecorded, &rdev->flags);
2852         }
2853
2854         sync_sbs(mddev, nospares);
2855         spin_unlock(&mddev->lock);
2856
2857         pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
2858                  mdname(mddev), mddev->in_sync);
2859
2860         if (mddev->queue)
2861                 blk_add_trace_msg(mddev->queue, "md md_update_sb");
2862 rewrite:
2863         md_bitmap_update_sb(mddev->bitmap);
2864         rdev_for_each(rdev, mddev) {
2865                 char b[BDEVNAME_SIZE];
2866
2867                 if (rdev->sb_loaded != 1)
2868                         continue; /* no noise on spare devices */
2869
2870                 if (!test_bit(Faulty, &rdev->flags)) {
2871                         md_super_write(mddev,rdev,
2872                                        rdev->sb_start, rdev->sb_size,
2873                                        rdev->sb_page);
2874                         pr_debug("md: (write) %s's sb offset: %llu\n",
2875                                  bdevname(rdev->bdev, b),
2876                                  (unsigned long long)rdev->sb_start);
2877                         rdev->sb_events = mddev->events;
2878                         if (rdev->badblocks.size) {
2879                                 md_super_write(mddev, rdev,
2880                                                rdev->badblocks.sector,
2881                                                rdev->badblocks.size << 9,
2882                                                rdev->bb_page);
2883                                 rdev->badblocks.size = 0;
2884                         }
2885
2886                 } else
2887                         pr_debug("md: %s (skipping faulty)\n",
2888                                  bdevname(rdev->bdev, b));
2889
2890                 if (mddev->level == LEVEL_MULTIPATH)
2891                         /* only need to write one superblock... */
2892                         break;
2893         }
2894         if (md_super_wait(mddev) < 0)
2895                 goto rewrite;
2896         /* if there was a failure, MD_SB_CHANGE_DEVS was set, and we re-write super */
2897
2898         if (mddev_is_clustered(mddev) && ret == 0)
2899                 md_cluster_ops->metadata_update_finish(mddev);
2900
2901         if (mddev->in_sync != sync_req ||
2902             !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
2903                                BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_CLEAN)))
2904                 /* have to write it out again */
2905                 goto repeat;
2906         wake_up(&mddev->sb_wait);
2907         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
2908                 sysfs_notify_dirent_safe(mddev->sysfs_completed);
2909
2910         rdev_for_each(rdev, mddev) {
2911                 if (test_and_clear_bit(FaultRecorded, &rdev->flags))
2912                         clear_bit(Blocked, &rdev->flags);
2913
2914                 if (any_badblocks_changed)
2915                         ack_all_badblocks(&rdev->badblocks);
2916                 clear_bit(BlockedBadBlocks, &rdev->flags);
2917                 wake_up(&rdev->blocked_wait);
2918         }
2919 }
2920 EXPORT_SYMBOL(md_update_sb);
2921
2922 static int add_bound_rdev(struct md_rdev *rdev)
2923 {
2924         struct mddev *mddev = rdev->mddev;
2925         int err = 0;
2926         bool add_journal = test_bit(Journal, &rdev->flags);
2927
2928         if (!mddev->pers->hot_remove_disk || add_journal) {
2929                 /* If there is hot_add_disk but no hot_remove_disk
2930                  * then added disks for geometry changes,
2931                  * and should be added immediately.
2932                  */
2933                 super_types[mddev->major_version].
2934                         validate_super(mddev, NULL/*freshest*/, rdev);
2935                 if (add_journal)
2936                         mddev_suspend(mddev);
2937                 err = mddev->pers->hot_add_disk(mddev, rdev);
2938                 if (add_journal)
2939                         mddev_resume(mddev);
2940                 if (err) {
2941                         md_kick_rdev_from_array(rdev);
2942                         return err;
2943                 }
2944         }
2945         sysfs_notify_dirent_safe(rdev->sysfs_state);
2946
2947         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2948         if (mddev->degraded)
2949                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
2950         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2951         md_new_event(mddev);
2952         md_wakeup_thread(mddev->thread);
2953         return 0;
2954 }
2955
2956 /* words written to sysfs files may, or may not, be \n terminated.
2957  * We want to accept with case. For this we use cmd_match.
2958  */
2959 static int cmd_match(const char *cmd, const char *str)
2960 {
2961         /* See if cmd, written into a sysfs file, matches
2962          * str.  They must either be the same, or cmd can
2963          * have a trailing newline
2964          */
2965         while (*cmd && *str && *cmd == *str) {
2966                 cmd++;
2967                 str++;
2968         }
2969         if (*cmd == '\n')
2970                 cmd++;
2971         if (*str || *cmd)
2972                 return 0;
2973         return 1;
2974 }
2975
2976 struct rdev_sysfs_entry {
2977         struct attribute attr;
2978         ssize_t (*show)(struct md_rdev *, char *);
2979         ssize_t (*store)(struct md_rdev *, const char *, size_t);
2980 };
2981
2982 static ssize_t
2983 state_show(struct md_rdev *rdev, char *page)
2984 {
2985         char *sep = ",";
2986         size_t len = 0;
2987         unsigned long flags = READ_ONCE(rdev->flags);
2988
2989         if (test_bit(Faulty, &flags) ||
2990             (!test_bit(ExternalBbl, &flags) &&
2991             rdev->badblocks.unacked_exist))
2992                 len += sprintf(page+len, "faulty%s", sep);
2993         if (test_bit(In_sync, &flags))
2994                 len += sprintf(page+len, "in_sync%s", sep);
2995         if (test_bit(Journal, &flags))
2996                 len += sprintf(page+len, "journal%s", sep);
2997         if (test_bit(WriteMostly, &flags))
2998                 len += sprintf(page+len, "write_mostly%s", sep);
2999         if (test_bit(Blocked, &flags) ||
3000             (rdev->badblocks.unacked_exist
3001              && !test_bit(Faulty, &flags)))
3002                 len += sprintf(page+len, "blocked%s", sep);
3003         if (!test_bit(Faulty, &flags) &&
3004             !test_bit(Journal, &flags) &&
3005             !test_bit(In_sync, &flags))
3006                 len += sprintf(page+len, "spare%s", sep);
3007         if (test_bit(WriteErrorSeen, &flags))
3008                 len += sprintf(page+len, "write_error%s", sep);
3009         if (test_bit(WantReplacement, &flags))
3010                 len += sprintf(page+len, "want_replacement%s", sep);
3011         if (test_bit(Replacement, &flags))
3012                 len += sprintf(page+len, "replacement%s", sep);
3013         if (test_bit(ExternalBbl, &flags))
3014                 len += sprintf(page+len, "external_bbl%s", sep);
3015         if (test_bit(FailFast, &flags))
3016                 len += sprintf(page+len, "failfast%s", sep);
3017
3018         if (len)
3019                 len -= strlen(sep);
3020
3021         return len+sprintf(page+len, "\n");
3022 }
3023
3024 static ssize_t
3025 state_store(struct md_rdev *rdev, const char *buf, size_t len)
3026 {
3027         /* can write
3028          *  faulty  - simulates an error
3029          *  remove  - disconnects the device
3030          *  writemostly - sets write_mostly
3031          *  -writemostly - clears write_mostly
3032          *  blocked - sets the Blocked flags
3033          *  -blocked - clears the Blocked and possibly simulates an error
3034          *  insync - sets Insync providing device isn't active
3035          *  -insync - clear Insync for a device with a slot assigned,
3036          *            so that it gets rebuilt based on bitmap
3037          *  write_error - sets WriteErrorSeen
3038          *  -write_error - clears WriteErrorSeen
3039          *  {,-}failfast - set/clear FailFast
3040          */
3041
3042         struct mddev *mddev = rdev->mddev;
3043         int err = -EINVAL;
3044         bool need_update_sb = false;
3045
3046         if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
3047                 md_error(rdev->mddev, rdev);
3048                 if (test_bit(Faulty, &rdev->flags))
3049                         err = 0;
3050                 else
3051                         err = -EBUSY;
3052         } else if (cmd_match(buf, "remove")) {
3053                 if (rdev->mddev->pers) {
3054                         clear_bit(Blocked, &rdev->flags);
3055                         remove_and_add_spares(rdev->mddev, rdev);
3056                 }
3057                 if (rdev->raid_disk >= 0)
3058                         err = -EBUSY;
3059                 else {
3060                         err = 0;
3061                         if (mddev_is_clustered(mddev))
3062                                 err = md_cluster_ops->remove_disk(mddev, rdev);
3063
3064                         if (err == 0) {
3065                                 md_kick_rdev_from_array(rdev);
3066                                 if (mddev->pers) {
3067                                         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
3068                                         md_wakeup_thread(mddev->thread);
3069                                 }
3070                                 md_new_event(mddev);
3071                         }
3072                 }
3073         } else if (cmd_match(buf, "writemostly")) {
3074                 set_bit(WriteMostly, &rdev->flags);
3075                 mddev_create_serial_pool(rdev->mddev, rdev, false);
3076                 need_update_sb = true;
3077                 err = 0;
3078         } else if (cmd_match(buf, "-writemostly")) {
3079                 mddev_destroy_serial_pool(rdev->mddev, rdev, false);
3080                 clear_bit(WriteMostly, &rdev->flags);
3081                 need_update_sb = true;
3082                 err = 0;
3083         } else if (cmd_match(buf, "blocked")) {
3084                 set_bit(Blocked, &rdev->flags);
3085                 err = 0;
3086         } else if (cmd_match(buf, "-blocked")) {
3087                 if (!test_bit(Faulty, &rdev->flags) &&
3088                     !test_bit(ExternalBbl, &rdev->flags) &&
3089                     rdev->badblocks.unacked_exist) {
3090                         /* metadata handler doesn't understand badblocks,
3091                          * so we need to fail the device
3092                          */
3093                         md_error(rdev->mddev, rdev);
3094                 }
3095                 clear_bit(Blocked, &rdev->flags);
3096                 clear_bit(BlockedBadBlocks, &rdev->flags);
3097                 wake_up(&rdev->blocked_wait);
3098                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3099                 md_wakeup_thread(rdev->mddev->thread);
3100
3101                 err = 0;
3102         } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
3103                 set_bit(In_sync, &rdev->flags);
3104                 err = 0;
3105         } else if (cmd_match(buf, "failfast")) {
3106                 set_bit(FailFast, &rdev->flags);
3107                 need_update_sb = true;
3108                 err = 0;
3109         } else if (cmd_match(buf, "-failfast")) {
3110                 clear_bit(FailFast, &rdev->flags);
3111                 need_update_sb = true;
3112                 err = 0;
3113         } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 &&
3114                    !test_bit(Journal, &rdev->flags)) {
3115                 if (rdev->mddev->pers == NULL) {
3116                         clear_bit(In_sync, &rdev->flags);
3117                         rdev->saved_raid_disk = rdev->raid_disk;
3118                         rdev->raid_disk = -1;
3119                         err = 0;
3120                 }
3121         } else if (cmd_match(buf, "write_error")) {
3122                 set_bit(WriteErrorSeen, &rdev->flags);
3123                 err = 0;
3124         } else if (cmd_match(buf, "-write_error")) {
3125                 clear_bit(WriteErrorSeen, &rdev->flags);
3126                 err = 0;
3127         } else if (cmd_match(buf, "want_replacement")) {
3128                 /* Any non-spare device that is not a replacement can
3129                  * become want_replacement at any time, but we then need to
3130                  * check if recovery is needed.
3131                  */
3132                 if (rdev->raid_disk >= 0 &&
3133                     !test_bit(Journal, &rdev->flags) &&
3134                     !test_bit(Replacement, &rdev->flags))
3135                         set_bit(WantReplacement, &rdev->flags);
3136                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3137                 md_wakeup_thread(rdev->mddev->thread);
3138                 err = 0;
3139         } else if (cmd_match(buf, "-want_replacement")) {
3140                 /* Clearing 'want_replacement' is always allowed.
3141                  * Once replacements starts it is too late though.
3142                  */
3143                 err = 0;
3144                 clear_bit(WantReplacement, &rdev->flags);
3145         } else if (cmd_match(buf, "replacement")) {
3146                 /* Can only set a device as a replacement when array has not
3147                  * yet been started.  Once running, replacement is automatic
3148                  * from spares, or by assigning 'slot'.
3149                  */
3150                 if (rdev->mddev->pers)
3151                         err = -EBUSY;
3152                 else {
3153                         set_bit(Replacement, &rdev->flags);
3154                         err = 0;
3155                 }
3156         } else if (cmd_match(buf, "-replacement")) {
3157                 /* Similarly, can only clear Replacement before start */
3158                 if (rdev->mddev->pers)
3159                         err = -EBUSY;
3160                 else {
3161                         clear_bit(Replacement, &rdev->flags);
3162                         err = 0;
3163                 }
3164         } else if (cmd_match(buf, "re-add")) {
3165                 if (!rdev->mddev->pers)
3166                         err = -EINVAL;
3167                 else if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
3168                                 rdev->saved_raid_disk >= 0) {
3169                         /* clear_bit is performed _after_ all the devices
3170                          * have their local Faulty bit cleared. If any writes
3171                          * happen in the meantime in the local node, they
3172                          * will land in the local bitmap, which will be synced
3173                          * by this node eventually
3174                          */
3175                         if (!mddev_is_clustered(rdev->mddev) ||
3176                             (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) {
3177                                 clear_bit(Faulty, &rdev->flags);
3178                                 err = add_bound_rdev(rdev);
3179                         }
3180                 } else
3181                         err = -EBUSY;
3182         } else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) {
3183                 set_bit(ExternalBbl, &rdev->flags);
3184                 rdev->badblocks.shift = 0;
3185                 err = 0;
3186         } else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) {
3187                 clear_bit(ExternalBbl, &rdev->flags);
3188                 err = 0;
3189         }
3190         if (need_update_sb)
3191                 md_update_sb(mddev, 1);
3192         if (!err)
3193                 sysfs_notify_dirent_safe(rdev->sysfs_state);
3194         return err ? err : len;
3195 }
3196 static struct rdev_sysfs_entry rdev_state =
3197 __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
3198
3199 static ssize_t
3200 errors_show(struct md_rdev *rdev, char *page)
3201 {
3202         return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
3203 }
3204
3205 static ssize_t
3206 errors_store(struct md_rdev *rdev, const char *buf, size_t len)
3207 {
3208         unsigned int n;
3209         int rv;
3210
3211         rv = kstrtouint(buf, 10, &n);
3212         if (rv < 0)
3213                 return rv;
3214         atomic_set(&rdev->corrected_errors, n);
3215         return len;
3216 }
3217 static struct rdev_sysfs_entry rdev_errors =
3218 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
3219
3220 static ssize_t
3221 slot_show(struct md_rdev *rdev, char *page)
3222 {
3223         if (test_bit(Journal, &rdev->flags))
3224                 return sprintf(page, "journal\n");
3225         else if (rdev->raid_disk < 0)
3226                 return sprintf(page, "none\n");
3227         else
3228                 return sprintf(page, "%d\n", rdev->raid_disk);
3229 }
3230
3231 static ssize_t
3232 slot_store(struct md_rdev *rdev, const char *buf, size_t len)
3233 {
3234         int slot;
3235         int err;
3236
3237         if (test_bit(Journal, &rdev->flags))
3238                 return -EBUSY;
3239         if (strncmp(buf, "none", 4)==0)
3240                 slot = -1;
3241         else {
3242                 err = kstrtouint(buf, 10, (unsigned int *)&slot);
3243                 if (err < 0)
3244                         return err;
3245                 if (slot < 0)
3246                         /* overflow */
3247                         return -ENOSPC;
3248         }
3249         if (rdev->mddev->pers && slot == -1) {
3250                 /* Setting 'slot' on an active array requires also
3251                  * updating the 'rd%d' link, and communicating
3252                  * with the personality with ->hot_*_disk.
3253                  * For now we only support removing
3254                  * failed/spare devices.  This normally happens automatically,
3255                  * but not when the metadata is externally managed.
3256                  */
3257                 if (rdev->raid_disk == -1)
3258                         return -EEXIST;
3259                 /* personality does all needed checks */
3260                 if (rdev->mddev->pers->hot_remove_disk == NULL)
3261                         return -EINVAL;
3262                 clear_bit(Blocked, &rdev->flags);
3263                 remove_and_add_spares(rdev->mddev, rdev);
3264                 if (rdev->raid_disk >= 0)
3265                         return -EBUSY;
3266                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3267                 md_wakeup_thread(rdev->mddev->thread);
3268         } else if (rdev->mddev->pers) {
3269                 /* Activating a spare .. or possibly reactivating
3270                  * if we ever get bitmaps working here.
3271                  */
3272                 int err;
3273
3274                 if (rdev->raid_disk != -1)
3275                         return -EBUSY;
3276
3277                 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
3278                         return -EBUSY;
3279
3280                 if (rdev->mddev->pers->hot_add_disk == NULL)
3281                         return -EINVAL;
3282
3283                 if (slot >= rdev->mddev->raid_disks &&
3284                     slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
3285                         return -ENOSPC;
3286
3287                 rdev->raid_disk = slot;
3288                 if (test_bit(In_sync, &rdev->flags))
3289                         rdev->saved_raid_disk = slot;
3290                 else
3291                         rdev->saved_raid_disk = -1;
3292                 clear_bit(In_sync, &rdev->flags);
3293                 clear_bit(Bitmap_sync, &rdev->flags);
3294                 err = rdev->mddev->pers->hot_add_disk(rdev->mddev, rdev);
3295                 if (err) {
3296                         rdev->raid_disk = -1;
3297                         return err;
3298                 } else
3299                         sysfs_notify_dirent_safe(rdev->sysfs_state);
3300                 /* failure here is OK */;
3301                 sysfs_link_rdev(rdev->mddev, rdev);
3302                 /* don't wakeup anyone, leave that to userspace. */
3303         } else {
3304                 if (slot >= rdev->mddev->raid_disks &&
3305                     slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
3306                         return -ENOSPC;
3307                 rdev->raid_disk = slot;
3308                 /* assume it is working */
3309                 clear_bit(Faulty, &rdev->flags);
3310                 clear_bit(WriteMostly, &rdev->flags);
3311                 set_bit(In_sync, &rdev->flags);
3312                 sysfs_notify_dirent_safe(rdev->sysfs_state);
3313         }
3314         return len;
3315 }
3316
3317 static struct rdev_sysfs_entry rdev_slot =
3318 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
3319
3320 static ssize_t
3321 offset_show(struct md_rdev *rdev, char *page)
3322 {
3323         return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
3324 }
3325
3326 static ssize_t
3327 offset_store(struct md_rdev *rdev, const char *buf, size_t len)
3328 {
3329         unsigned long long offset;
3330         if (kstrtoull(buf, 10, &offset) < 0)
3331                 return -EINVAL;
3332         if (rdev->mddev->pers && rdev->raid_disk >= 0)
3333                 return -EBUSY;
3334         if (rdev->sectors && rdev->mddev->external)
3335                 /* Must set offset before size, so overlap checks
3336                  * can be sane */
3337                 return -EBUSY;
3338         rdev->data_offset = offset;
3339         rdev->new_data_offset = offset;
3340         return len;
3341 }
3342
3343 static struct rdev_sysfs_entry rdev_offset =
3344 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
3345
3346 static ssize_t new_offset_show(struct md_rdev *rdev, char *page)
3347 {
3348         return sprintf(page, "%llu\n",
3349                        (unsigned long long)rdev->new_data_offset);
3350 }
3351
3352 static ssize_t new_offset_store(struct md_rdev *rdev,
3353                                 const char *buf, size_t len)
3354 {
3355         unsigned long long new_offset;
3356         struct mddev *mddev = rdev->mddev;
3357
3358         if (kstrtoull(buf, 10, &new_offset) < 0)
3359                 return -EINVAL;
3360
3361         if (mddev->sync_thread ||
3362             test_bit(MD_RECOVERY_RUNNING,&mddev->recovery))
3363                 return -EBUSY;
3364         if (new_offset == rdev->data_offset)
3365                 /* reset is always permitted */
3366                 ;
3367         else if (new_offset > rdev->data_offset) {
3368                 /* must not push array size beyond rdev_sectors */
3369                 if (new_offset - rdev->data_offset
3370                     + mddev->dev_sectors > rdev->sectors)
3371                                 return -E2BIG;
3372         }
3373         /* Metadata worries about other space details. */
3374
3375         /* decreasing the offset is inconsistent with a backwards
3376          * reshape.
3377          */
3378         if (new_offset < rdev->data_offset &&
3379             mddev->reshape_backwards)
3380                 return -EINVAL;
3381         /* Increasing offset is inconsistent with forwards
3382          * reshape.  reshape_direction should be set to
3383          * 'backwards' first.
3384          */
3385         if (new_offset > rdev->data_offset &&
3386             !mddev->reshape_backwards)
3387                 return -EINVAL;
3388
3389         if (mddev->pers && mddev->persistent &&
3390             !super_types[mddev->major_version]
3391             .allow_new_offset(rdev, new_offset))
3392                 return -E2BIG;
3393         rdev->new_data_offset = new_offset;
3394         if (new_offset > rdev->data_offset)
3395                 mddev->reshape_backwards = 1;
3396         else if (new_offset < rdev->data_offset)
3397                 mddev->reshape_backwards = 0;
3398
3399         return len;
3400 }
3401 static struct rdev_sysfs_entry rdev_new_offset =
3402 __ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store);
3403
3404 static ssize_t
3405 rdev_size_show(struct md_rdev *rdev, char *page)
3406 {
3407         return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
3408 }
3409
3410 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
3411 {
3412         /* check if two start/length pairs overlap */
3413         if (s1+l1 <= s2)
3414                 return 0;
3415         if (s2+l2 <= s1)
3416                 return 0;
3417         return 1;
3418 }
3419
3420 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
3421 {
3422         unsigned long long blocks;
3423         sector_t new;
3424
3425         if (kstrtoull(buf, 10, &blocks) < 0)
3426                 return -EINVAL;
3427
3428         if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
3429                 return -EINVAL; /* sector conversion overflow */
3430
3431         new = blocks * 2;
3432         if (new != blocks * 2)
3433                 return -EINVAL; /* unsigned long long to sector_t overflow */
3434
3435         *sectors = new;
3436         return 0;
3437 }
3438
3439 static ssize_t
3440 rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
3441 {
3442         struct mddev *my_mddev = rdev->mddev;
3443         sector_t oldsectors = rdev->sectors;
3444         sector_t sectors;
3445
3446         if (test_bit(Journal, &rdev->flags))
3447                 return -EBUSY;
3448         if (strict_blocks_to_sectors(buf, &sectors) < 0)
3449                 return -EINVAL;
3450         if (rdev->data_offset != rdev->new_data_offset)
3451                 return -EINVAL; /* too confusing */
3452         if (my_mddev->pers && rdev->raid_disk >= 0) {
3453                 if (my_mddev->persistent) {
3454                         sectors = super_types[my_mddev->major_version].
3455                                 rdev_size_change(rdev, sectors);
3456                         if (!sectors)
3457                                 return -EBUSY;
3458                 } else if (!sectors)
3459                         sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
3460                                 rdev->data_offset;
3461                 if (!my_mddev->pers->resize)
3462                         /* Cannot change size for RAID0 or Linear etc */
3463                         return -EINVAL;
3464         }
3465         if (sectors < my_mddev->dev_sectors)
3466                 return -EINVAL; /* component must fit device */
3467
3468         rdev->sectors = sectors;
3469         if (sectors > oldsectors && my_mddev->external) {
3470                 /* Need to check that all other rdevs with the same
3471                  * ->bdev do not overlap.  'rcu' is sufficient to walk
3472                  * the rdev lists safely.
3473                  * This check does not provide a hard guarantee, it
3474                  * just helps avoid dangerous mistakes.
3475                  */
3476                 struct mddev *mddev;
3477                 int overlap = 0;
3478                 struct list_head *tmp;
3479
3480                 rcu_read_lock();
3481                 for_each_mddev(mddev, tmp) {
3482                         struct md_rdev *rdev2;
3483
3484                         rdev_for_each(rdev2, mddev)
3485                                 if (rdev->bdev == rdev2->bdev &&
3486                                     rdev != rdev2 &&
3487                                     overlaps(rdev->data_offset, rdev->sectors,
3488                                              rdev2->data_offset,
3489                                              rdev2->sectors)) {
3490                                         overlap = 1;
3491                                         break;
3492                                 }
3493                         if (overlap) {
3494                                 mddev_put(mddev);
3495                                 break;
3496                         }
3497                 }
3498                 rcu_read_unlock();
3499                 if (overlap) {
3500                         /* Someone else could have slipped in a size
3501                          * change here, but doing so is just silly.
3502                          * We put oldsectors back because we *know* it is
3503                          * safe, and trust userspace not to race with
3504                          * itself
3505                          */
3506                         rdev->sectors = oldsectors;
3507                         return -EBUSY;
3508                 }
3509         }
3510         return len;
3511 }
3512
3513 static struct rdev_sysfs_entry rdev_size =
3514 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
3515
3516 static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
3517 {
3518         unsigned long long recovery_start = rdev->recovery_offset;
3519
3520         if (test_bit(In_sync, &rdev->flags) ||
3521             recovery_start == MaxSector)
3522                 return sprintf(page, "none\n");
3523
3524         return sprintf(page, "%llu\n", recovery_start);
3525 }
3526
3527 static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len)
3528 {
3529         unsigned long long recovery_start;
3530
3531         if (cmd_match(buf, "none"))
3532                 recovery_start = MaxSector;
3533         else if (kstrtoull(buf, 10, &recovery_start))
3534                 return -EINVAL;
3535
3536         if (rdev->mddev->pers &&
3537             rdev->raid_disk >= 0)
3538                 return -EBUSY;
3539
3540         rdev->recovery_offset = recovery_start;
3541         if (recovery_start == MaxSector)
3542                 set_bit(In_sync, &rdev->flags);
3543         else
3544                 clear_bit(In_sync, &rdev->flags);
3545         return len;
3546 }
3547
3548 static struct rdev_sysfs_entry rdev_recovery_start =
3549 __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
3550
3551 /* sysfs access to bad-blocks list.
3552  * We present two files.
3553  * 'bad-blocks' lists sector numbers and lengths of ranges that
3554  *    are recorded as bad.  The list is truncated to fit within
3555  *    the one-page limit of sysfs.
3556  *    Writing "sector length" to this file adds an acknowledged
3557  *    bad block list.
3558  * 'unacknowledged-bad-blocks' lists bad blocks that have not yet
3559  *    been acknowledged.  Writing to this file adds bad blocks
3560  *    without acknowledging them.  This is largely for testing.
3561  */
3562 static ssize_t bb_show(struct md_rdev *rdev, char *page)
3563 {
3564         return badblocks_show(&rdev->badblocks, page, 0);
3565 }
3566 static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
3567 {
3568         int rv = badblocks_store(&rdev->badblocks, page, len, 0);
3569         /* Maybe that ack was all we needed */
3570         if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
3571                 wake_up(&rdev->blocked_wait);
3572         return rv;
3573 }
3574 static struct rdev_sysfs_entry rdev_bad_blocks =
3575 __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
3576
3577 static ssize_t ubb_show(struct md_rdev *rdev, char *page)
3578 {
3579         return badblocks_show(&rdev->badblocks, page, 1);
3580 }
3581 static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
3582 {
3583         return badblocks_store(&rdev->badblocks, page, len, 1);
3584 }
3585 static struct rdev_sysfs_entry rdev_unack_bad_blocks =
3586 __ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);
3587
3588 static ssize_t
3589 ppl_sector_show(struct md_rdev *rdev, char *page)
3590 {
3591         return sprintf(page, "%llu\n", (unsigned long long)rdev->ppl.sector);
3592 }
3593
3594 static ssize_t
3595 ppl_sector_store(struct md_rdev *rdev, const char *buf, size_t len)
3596 {
3597         unsigned long long sector;
3598
3599         if (kstrtoull(buf, 10, &sector) < 0)
3600                 return -EINVAL;
3601         if (sector != (sector_t)sector)
3602                 return -EINVAL;
3603
3604         if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) &&
3605             rdev->raid_disk >= 0)
3606                 return -EBUSY;
3607
3608         if (rdev->mddev->persistent) {
3609                 if (rdev->mddev->major_version == 0)
3610                         return -EINVAL;
3611                 if ((sector > rdev->sb_start &&
3612                      sector - rdev->sb_start > S16_MAX) ||
3613                     (sector < rdev->sb_start &&
3614                      rdev->sb_start - sector > -S16_MIN))
3615                         return -EINVAL;
3616                 rdev->ppl.offset = sector - rdev->sb_start;
3617         } else if (!rdev->mddev->external) {
3618                 return -EBUSY;
3619         }
3620         rdev->ppl.sector = sector;
3621         return len;
3622 }
3623
3624 static struct rdev_sysfs_entry rdev_ppl_sector =
3625 __ATTR(ppl_sector, S_IRUGO|S_IWUSR, ppl_sector_show, ppl_sector_store);
3626
3627 static ssize_t
3628 ppl_size_show(struct md_rdev *rdev, char *page)
3629 {
3630         return sprintf(page, "%u\n", rdev->ppl.size);
3631 }
3632
3633 static ssize_t
3634 ppl_size_store(struct md_rdev *rdev, const char *buf, size_t len)
3635 {
3636         unsigned int size;
3637
3638         if (kstrtouint(buf, 10, &size) < 0)
3639                 return -EINVAL;
3640
3641         if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) &&
3642             rdev->raid_disk >= 0)
3643                 return -EBUSY;
3644
3645         if (rdev->mddev->persistent) {
3646                 if (rdev->mddev->major_version == 0)
3647                         return -EINVAL;
3648                 if (size > U16_MAX)
3649                         return -EINVAL;
3650         } else if (!rdev->mddev->external) {
3651                 return -EBUSY;
3652         }
3653         rdev->ppl.size = size;
3654         return len;
3655 }
3656
3657 static struct rdev_sysfs_entry rdev_ppl_size =
3658 __ATTR(ppl_size, S_IRUGO|S_IWUSR, ppl_size_show, ppl_size_store);
3659
3660 static struct attribute *rdev_default_attrs[] = {
3661         &rdev_state.attr,
3662         &rdev_errors.attr,
3663         &rdev_slot.attr,
3664         &rdev_offset.attr,
3665         &rdev_new_offset.attr,
3666         &rdev_size.attr,
3667         &rdev_recovery_start.attr,
3668         &rdev_bad_blocks.attr,
3669         &rdev_unack_bad_blocks.attr,
3670         &rdev_ppl_sector.attr,
3671         &rdev_ppl_size.attr,
3672         NULL,
3673 };
3674 static ssize_t
3675 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
3676 {
3677         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
3678         struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3679
3680         if (!entry->show)
3681                 return -EIO;
3682         if (!rdev->mddev)
3683                 return -ENODEV;
3684         return entry->show(rdev, page);
3685 }
3686
3687 static ssize_t
3688 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
3689               const char *page, size_t length)
3690 {
3691         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
3692         struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3693         ssize_t rv;
3694         struct mddev *mddev = rdev->mddev;
3695
3696         if (!entry->store)
3697                 return -EIO;
3698         if (!capable(CAP_SYS_ADMIN))
3699                 return -EACCES;
3700         rv = mddev ? mddev_lock(mddev) : -ENODEV;
3701         if (!rv) {
3702                 if (rdev->mddev == NULL)
3703                         rv = -ENODEV;
3704                 else
3705                         rv = entry->store(rdev, page, length);
3706                 mddev_unlock(mddev);
3707         }
3708         return rv;
3709 }
3710
3711 static void rdev_free(struct kobject *ko)
3712 {
3713         struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj);
3714         kfree(rdev);
3715 }
3716 static const struct sysfs_ops rdev_sysfs_ops = {
3717         .show           = rdev_attr_show,
3718         .store          = rdev_attr_store,
3719 };
3720 static struct kobj_type rdev_ktype = {
3721         .release        = rdev_free,
3722         .sysfs_ops      = &rdev_sysfs_ops,
3723         .default_attrs  = rdev_default_attrs,
3724 };
3725
3726 int md_rdev_init(struct md_rdev *rdev)
3727 {
3728         rdev->desc_nr = -1;
3729         rdev->saved_raid_disk = -1;
3730         rdev->raid_disk = -1;
3731         rdev->flags = 0;
3732         rdev->data_offset = 0;
3733         rdev->new_data_offset = 0;
3734         rdev->sb_events = 0;
3735         rdev->last_read_error = 0;
3736         rdev->sb_loaded = 0;
3737         rdev->bb_page = NULL;
3738         atomic_set(&rdev->nr_pending, 0);
3739         atomic_set(&rdev->read_errors, 0);
3740         atomic_set(&rdev->corrected_errors, 0);
3741
3742         INIT_LIST_HEAD(&rdev->same_set);
3743         init_waitqueue_head(&rdev->blocked_wait);
3744
3745         /* Add space to store bad block list.
3746          * This reserves the space even on arrays where it cannot
3747          * be used - I wonder if that matters
3748          */
3749         return badblocks_init(&rdev->badblocks, 0);
3750 }
3751 EXPORT_SYMBOL_GPL(md_rdev_init);
3752 /*
3753  * Import a device. If 'super_format' >= 0, then sanity check the superblock
3754  *
3755  * mark the device faulty if:
3756  *
3757  *   - the device is nonexistent (zero size)
3758  *   - the device has no valid superblock
3759  *
3760  * a faulty rdev _never_ has rdev->sb set.
3761  */
3762 static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
3763 {
3764         char b[BDEVNAME_SIZE];
3765         int err;
3766         struct md_rdev *rdev;
3767         sector_t size;
3768
3769         rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
3770         if (!rdev)
3771                 return ERR_PTR(-ENOMEM);
3772
3773         err = md_rdev_init(rdev);
3774         if (err)
3775                 goto abort_free;
3776         err = alloc_disk_sb(rdev);
3777         if (err)
3778                 goto abort_free;
3779
3780         err = lock_rdev(rdev, newdev, super_format == -2);
3781         if (err)
3782                 goto abort_free;
3783
3784         kobject_init(&rdev->kobj, &rdev_ktype);
3785
3786         size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
3787         if (!size) {
3788                 pr_warn("md: %s has zero or unknown size, marking faulty!\n",
3789                         bdevname(rdev->bdev,b));
3790                 err = -EINVAL;
3791                 goto abort_free;
3792         }
3793
3794         if (super_format >= 0) {
3795                 err = super_types[super_format].
3796                         load_super(rdev, NULL, super_minor);
3797                 if (err == -EINVAL) {
3798                         pr_warn("md: %s does not have a valid v%d.%d superblock, not importing!\n",
3799                                 bdevname(rdev->bdev,b),
3800                                 super_format, super_minor);
3801                         goto abort_free;
3802                 }
3803                 if (err < 0) {
3804                         pr_warn("md: could not read %s's sb, not importing!\n",
3805                                 bdevname(rdev->bdev,b));
3806                         goto abort_free;
3807                 }
3808         }
3809
3810         return rdev;
3811
3812 abort_free:
3813         if (rdev->bdev)
3814                 unlock_rdev(rdev);
3815         md_rdev_clear(rdev);
3816         kfree(rdev);
3817         return ERR_PTR(err);
3818 }
3819
3820 /*
3821  * Check a full RAID array for plausibility
3822  */
3823
3824 static int analyze_sbs(struct mddev *mddev)
3825 {
3826         int i;
3827         struct md_rdev *rdev, *freshest, *tmp;
3828         char b[BDEVNAME_SIZE];
3829
3830         freshest = NULL;
3831         rdev_for_each_safe(rdev, tmp, mddev)
3832                 switch (super_types[mddev->major_version].
3833                         load_super(rdev, freshest, mddev->minor_version)) {
3834                 case 1:
3835                         freshest = rdev;
3836                         break;
3837                 case 0:
3838                         break;
3839                 default:
3840                         pr_warn("md: fatal superblock inconsistency in %s -- removing from array\n",
3841                                 bdevname(rdev->bdev,b));
3842                         md_kick_rdev_from_array(rdev);
3843                 }
3844
3845         /* Cannot find a valid fresh disk */
3846         if (!freshest) {
3847                 pr_warn("md: cannot find a valid disk\n");
3848                 return -EINVAL;
3849         }
3850
3851         super_types[mddev->major_version].
3852                 validate_super(mddev, NULL/*freshest*/, freshest);
3853
3854         i = 0;
3855         rdev_for_each_safe(rdev, tmp, mddev) {
3856                 if (mddev->max_disks &&
3857                     (rdev->desc_nr >= mddev->max_disks ||
3858                      i > mddev->max_disks)) {
3859                         pr_warn("md: %s: %s: only %d devices permitted\n",
3860                                 mdname(mddev), bdevname(rdev->bdev, b),
3861                                 mddev->max_disks);
3862                         md_kick_rdev_from_array(rdev);
3863                         continue;
3864                 }
3865                 if (rdev != freshest) {
3866                         if (super_types[mddev->major_version].
3867                             validate_super(mddev, freshest, rdev)) {
3868                                 pr_warn("md: kicking non-fresh %s from array!\n",
3869                                         bdevname(rdev->bdev,b));
3870                                 md_kick_rdev_from_array(rdev);
3871                                 continue;
3872                         }
3873                 }
3874                 if (mddev->level == LEVEL_MULTIPATH) {
3875                         rdev->desc_nr = i++;
3876                         rdev->raid_disk = rdev->desc_nr;
3877                         set_bit(In_sync, &rdev->flags);
3878                 } else if (rdev->raid_disk >=
3879                             (mddev->raid_disks - min(0, mddev->delta_disks)) &&
3880                            !test_bit(Journal, &rdev->flags)) {
3881                         rdev->raid_disk = -1;
3882                         clear_bit(In_sync, &rdev->flags);
3883                 }
3884         }
3885
3886         return 0;
3887 }
3888
3889 /* Read a fixed-point number.
3890  * Numbers in sysfs attributes should be in "standard" units where
3891  * possible, so time should be in seconds.
3892  * However we internally use a a much smaller unit such as
3893  * milliseconds or jiffies.
3894  * This function takes a decimal number with a possible fractional
3895  * component, and produces an integer which is the result of
3896  * multiplying that number by 10^'scale'.
3897  * all without any floating-point arithmetic.
3898  */
3899 int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
3900 {
3901         unsigned long result = 0;
3902         long decimals = -1;
3903         while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
3904                 if (*cp == '.')
3905                         decimals = 0;
3906                 else if (decimals < scale) {
3907                         unsigned int value;
3908                         value = *cp - '0';
3909                         result = result * 10 + value;
3910                         if (decimals >= 0)
3911                                 decimals++;
3912                 }
3913                 cp++;
3914         }
3915         if (*cp == '\n')
3916                 cp++;
3917         if (*cp)
3918                 return -EINVAL;
3919         if (decimals < 0)
3920                 decimals = 0;
3921         *res = result * int_pow(10, scale - decimals);
3922         return 0;
3923 }
3924
3925 static ssize_t
3926 safe_delay_show(struct mddev *mddev, char *page)
3927 {
3928         unsigned int msec = ((unsigned long)mddev->safemode_delay*1000)/HZ;
3929
3930         return sprintf(page, "%u.%03u\n", msec/1000, msec%1000);
3931 }
3932 static ssize_t
3933 safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
3934 {
3935         unsigned long msec;
3936
3937         if (mddev_is_clustered(mddev)) {
3938                 pr_warn("md: Safemode is disabled for clustered mode\n");
3939                 return -EINVAL;
3940         }
3941
3942         if (strict_strtoul_scaled(cbuf, &msec, 3) < 0 || msec > UINT_MAX / HZ)
3943                 return -EINVAL;
3944         if (msec == 0)
3945                 mddev->safemode_delay = 0;
3946         else {
3947                 unsigned long old_delay = mddev->safemode_delay;
3948                 unsigned long new_delay = (msec*HZ)/1000;
3949
3950                 if (new_delay == 0)
3951                         new_delay = 1;
3952                 mddev->safemode_delay = new_delay;
3953                 if (new_delay < old_delay || old_delay == 0)
3954                         mod_timer(&mddev->safemode_timer, jiffies+1);
3955         }
3956         return len;
3957 }
3958 static struct md_sysfs_entry md_safe_delay =
3959 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
3960
3961 static ssize_t
3962 level_show(struct mddev *mddev, char *page)
3963 {
3964         struct md_personality *p;
3965         int ret;
3966         spin_lock(&mddev->lock);
3967         p = mddev->pers;
3968         if (p)
3969                 ret = sprintf(page, "%s\n", p->name);
3970         else if (mddev->clevel[0])
3971                 ret = sprintf(page, "%s\n", mddev->clevel);
3972         else if (mddev->level != LEVEL_NONE)
3973                 ret = sprintf(page, "%d\n", mddev->level);
3974         else
3975                 ret = 0;
3976         spin_unlock(&mddev->lock);
3977         return ret;
3978 }
3979
3980 static ssize_t
3981 level_store(struct mddev *mddev, const char *buf, size_t len)
3982 {
3983         char clevel[16];
3984         ssize_t rv;
3985         size_t slen = len;
3986         struct md_personality *pers, *oldpers;
3987         long level;
3988         void *priv, *oldpriv;
3989         struct md_rdev *rdev;
3990
3991         if (slen == 0 || slen >= sizeof(clevel))
3992                 return -EINVAL;
3993
3994         rv = mddev_lock(mddev);
3995         if (rv)
3996                 return rv;
3997
3998         if (mddev->pers == NULL) {
3999                 strncpy(mddev->clevel, buf, slen);
4000                 if (mddev->clevel[slen-1] == '\n')
4001                         slen--;
4002                 mddev->clevel[slen] = 0;
4003                 mddev->level = LEVEL_NONE;
4004                 rv = len;
4005                 goto out_unlock;
4006         }
4007         rv = -EROFS;
4008         if (mddev->ro)
4009                 goto out_unlock;
4010
4011         /* request to change the personality.  Need to ensure:
4012          *  - array is not engaged in resync/recovery/reshape
4013          *  - old personality can be suspended
4014          *  - new personality will access other array.
4015          */
4016
4017         rv = -EBUSY;
4018         if (mddev->sync_thread ||
4019             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
4020             mddev->reshape_position != MaxSector ||
4021             mddev->sysfs_active)
4022                 goto out_unlock;
4023
4024         rv = -EINVAL;
4025         if (!mddev->pers->quiesce) {
4026                 pr_warn("md: %s: %s does not support online personality change\n",
4027                         mdname(mddev), mddev->pers->name);
4028                 goto out_unlock;
4029         }
4030
4031         /* Now find the new personality */
4032         strncpy(clevel, buf, slen);
4033         if (clevel[slen-1] == '\n')
4034                 slen--;
4035         clevel[slen] = 0;
4036         if (kstrtol(clevel, 10, &level))
4037                 level = LEVEL_NONE;
4038
4039         if (request_module("md-%s", clevel) != 0)
4040                 request_module("md-level-%s", clevel);
4041         spin_lock(&pers_lock);
4042         pers = find_pers(level, clevel);
4043         if (!pers || !try_module_get(pers->owner)) {
4044                 spin_unlock(&pers_lock);
4045                 pr_warn("md: personality %s not loaded\n", clevel);
4046                 rv = -EINVAL;
4047                 goto out_unlock;
4048         }
4049         spin_unlock(&pers_lock);
4050
4051         if (pers == mddev->pers) {
4052                 /* Nothing to do! */
4053                 module_put(pers->owner);
4054                 rv = len;
4055                 goto out_unlock;
4056         }
4057         if (!pers->takeover) {
4058                 module_put(pers->owner);
4059                 pr_warn("md: %s: %s does not support personality takeover\n",
4060                         mdname(mddev), clevel);
4061                 rv = -EINVAL;
4062                 goto out_unlock;
4063         }
4064
4065         rdev_for_each(rdev, mddev)
4066                 rdev->new_raid_disk = rdev->raid_disk;
4067
4068         /* ->takeover must set new_* and/or delta_disks
4069          * if it succeeds, and may set them when it fails.
4070          */
4071         priv = pers->takeover(mddev);
4072         if (IS_ERR(priv)) {
4073                 mddev->new_level = mddev->level;
4074                 mddev->new_layout = mddev->layout;
4075                 mddev->new_chunk_sectors = mddev->chunk_sectors;
4076                 mddev->raid_disks -= mddev->delta_disks;
4077                 mddev->delta_disks = 0;
4078                 mddev->reshape_backwards = 0;
4079                 module_put(pers->owner);
4080                 pr_warn("md: %s: %s would not accept array\n",
4081                         mdname(mddev), clevel);
4082                 rv = PTR_ERR(priv);
4083                 goto out_unlock;
4084         }
4085
4086         /* Looks like we have a winner */
4087         mddev_suspend(mddev);
4088         mddev_detach(mddev);
4089
4090         spin_lock(&mddev->lock);
4091         oldpers = mddev->pers;
4092         oldpriv = mddev->private;
4093         mddev->pers = pers;
4094         mddev->private = priv;
4095         strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
4096         mddev->level = mddev->new_level;
4097         mddev->layout = mddev->new_layout;
4098         mddev->chunk_sectors = mddev->new_chunk_sectors;
4099         mddev->delta_disks = 0;
4100         mddev->reshape_backwards = 0;
4101         mddev->degraded = 0;
4102         spin_unlock(&mddev->lock);
4103
4104         if (oldpers->sync_request == NULL &&
4105             mddev->external) {
4106                 /* We are converting from a no-redundancy array
4107                  * to a redundancy array and metadata is managed
4108                  * externally so we need to be sure that writes
4109                  * won't block due to a need to transition
4110                  *      clean->dirty
4111                  * until external management is started.
4112                  */
4113                 mddev->in_sync = 0;
4114                 mddev->safemode_delay = 0;
4115                 mddev->safemode = 0;
4116         }
4117
4118         oldpers->free(mddev, oldpriv);
4119
4120         if (oldpers->sync_request == NULL &&
4121             pers->sync_request != NULL) {
4122                 /* need to add the md_redundancy_group */
4123                 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
4124                         pr_warn("md: cannot register extra attributes for %s\n",
4125                                 mdname(mddev));
4126                 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
4127                 mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
4128                 mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
4129         }
4130         if (oldpers->sync_request != NULL &&
4131             pers->sync_request == NULL) {
4132                 /* need to remove the md_redundancy_group */
4133                 if (mddev->to_remove == NULL)
4134                         mddev->to_remove = &md_redundancy_group;
4135         }
4136
4137         module_put(oldpers->owner);
4138
4139         rdev_for_each(rdev, mddev) {
4140                 if (rdev->raid_disk < 0)
4141                         continue;
4142                 if (rdev->new_raid_disk >= mddev->raid_disks)
4143                         rdev->new_raid_disk = -1;
4144                 if (rdev->new_raid_disk == rdev->raid_disk)
4145                         continue;
4146                 sysfs_unlink_rdev(mddev, rdev);
4147         }
4148         rdev_for_each(rdev, mddev) {
4149                 if (rdev->raid_disk < 0)
4150                         continue;
4151                 if (rdev->new_raid_disk == rdev->raid_disk)
4152                         continue;
4153                 rdev->raid_disk = rdev->new_raid_disk;
4154                 if (rdev->raid_disk < 0)
4155                         clear_bit(In_sync, &rdev->flags);
4156                 else {
4157                         if (sysfs_link_rdev(mddev, rdev))
4158                                 pr_warn("md: cannot register rd%d for %s after level change\n",
4159                                         rdev->raid_disk, mdname(mddev));
4160                 }
4161         }
4162
4163         if (pers->sync_request == NULL) {
4164                 /* this is now an array without redundancy, so
4165                  * it must always be in_sync
4166                  */
4167                 mddev->in_sync = 1;
4168                 del_timer_sync(&mddev->safemode_timer);
4169         }
4170         blk_set_stacking_limits(&mddev->queue->limits);
4171         pers->run(mddev);
4172         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
4173         mddev_resume(mddev);
4174         if (!mddev->thread)
4175                 md_update_sb(mddev, 1);
4176         sysfs_notify_dirent_safe(mddev->sysfs_level);
4177         md_new_event(mddev);
4178         rv = len;
4179 out_unlock:
4180         mddev_unlock(mddev);
4181         return rv;
4182 }
4183
4184 static struct md_sysfs_entry md_level =
4185 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
4186
4187 static ssize_t
4188 layout_show(struct mddev *mddev, char *page)
4189 {
4190         /* just a number, not meaningful for all levels */
4191         if (mddev->reshape_position != MaxSector &&
4192             mddev->layout != mddev->new_layout)
4193                 return sprintf(page, "%d (%d)\n",
4194                                mddev->new_layout, mddev->layout);
4195         return sprintf(page, "%d\n", mddev->layout);
4196 }
4197
4198 static ssize_t
4199 layout_store(struct mddev *mddev, const char *buf, size_t len)
4200 {
4201         unsigned int n;
4202         int err;
4203
4204         err = kstrtouint(buf, 10, &n);
4205         if (err < 0)
4206                 return err;
4207         err = mddev_lock(mddev);
4208         if (err)
4209                 return err;
4210
4211         if (mddev->pers) {
4212                 if (mddev->pers->check_reshape == NULL)
4213                         err = -EBUSY;
4214                 else if (mddev->ro)
4215                         err = -EROFS;
4216                 else {
4217                         mddev->new_layout = n;
4218                         err = mddev->pers->check_reshape(mddev);
4219                         if (err)
4220                                 mddev->new_layout = mddev->layout;
4221                 }
4222         } else {
4223                 mddev->new_layout = n;
4224                 if (mddev->reshape_position == MaxSector)
4225                         mddev->layout = n;
4226         }
4227         mddev_unlock(mddev);
4228         return err ?: len;
4229 }
4230 static struct md_sysfs_entry md_layout =
4231 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
4232
4233 static ssize_t
4234 raid_disks_show(struct mddev *mddev, char *page)
4235 {
4236         if (mddev->raid_disks == 0)
4237                 return 0;
4238         if (mddev->reshape_position != MaxSector &&
4239             mddev->delta_disks != 0)
4240                 return sprintf(page, "%d (%d)\n", mddev->raid_disks,
4241                                mddev->raid_disks - mddev->delta_disks);
4242         return sprintf(page, "%d\n", mddev->raid_disks);
4243 }
4244
4245 static int update_raid_disks(struct mddev *mddev, int raid_disks);
4246
4247 static ssize_t
4248 raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
4249 {
4250         unsigned int n;
4251         int err;
4252
4253         err = kstrtouint(buf, 10, &n);
4254         if (err < 0)
4255                 return err;
4256
4257         err = mddev_lock(mddev);
4258         if (err)
4259                 return err;
4260         if (mddev->pers)
4261                 err = update_raid_disks(mddev, n);
4262         else if (mddev->reshape_position != MaxSector) {
4263                 struct md_rdev *rdev;
4264                 int olddisks = mddev->raid_disks - mddev->delta_disks;
4265
4266                 err = -EINVAL;
4267                 rdev_for_each(rdev, mddev) {
4268                         if (olddisks < n &&
4269                             rdev->data_offset < rdev->new_data_offset)
4270                                 goto out_unlock;
4271                         if (olddisks > n &&
4272                             rdev->data_offset > rdev->new_data_offset)
4273                                 goto out_unlock;
4274                 }
4275                 err = 0;
4276                 mddev->delta_disks = n - olddisks;
4277                 mddev->raid_disks = n;
4278                 mddev->reshape_backwards = (mddev->delta_disks < 0);
4279         } else
4280                 mddev->raid_disks = n;
4281 out_unlock:
4282         mddev_unlock(mddev);
4283         return err ? err : len;
4284 }
4285 static struct md_sysfs_entry md_raid_disks =
4286 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
4287
4288 static ssize_t
4289 uuid_show(struct mddev *mddev, char *page)
4290 {
4291         return sprintf(page, "%pU\n", mddev->uuid);
4292 }
4293 static struct md_sysfs_entry md_uuid =
4294 __ATTR(uuid, S_IRUGO, uuid_show, NULL);
4295
4296 static ssize_t
4297 chunk_size_show(struct mddev *mddev, char *page)
4298 {
4299         if (mddev->reshape_position != MaxSector &&
4300             mddev->chunk_sectors != mddev->new_chunk_sectors)
4301                 return sprintf(page, "%d (%d)\n",
4302                                mddev->new_chunk_sectors << 9,
4303                                mddev->chunk_sectors << 9);
4304         return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
4305 }
4306
4307 static ssize_t
4308 chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
4309 {
4310         unsigned long n;
4311         int err;
4312
4313         err = kstrtoul(buf, 10, &n);
4314         if (err < 0)
4315                 return err;
4316
4317         err = mddev_lock(mddev);
4318         if (err)
4319                 return err;
4320         if (mddev->pers) {
4321                 if (mddev->pers->check_reshape == NULL)
4322                         err = -EBUSY;
4323                 else if (mddev->ro)
4324                         err = -EROFS;
4325                 else {
4326                         mddev->new_chunk_sectors = n >> 9;
4327                         err = mddev->pers->check_reshape(mddev);
4328                         if (err)
4329                                 mddev->new_chunk_sectors = mddev->chunk_sectors;
4330                 }
4331         } else {
4332                 mddev->new_chunk_sectors = n >> 9;
4333                 if (mddev->reshape_position == MaxSector)
4334                         mddev->chunk_sectors = n >> 9;
4335         }
4336         mddev_unlock(mddev);
4337         return err ?: len;
4338 }
4339 static struct md_sysfs_entry md_chunk_size =
4340 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
4341
4342 static ssize_t
4343 resync_start_show(struct mddev *mddev, char *page)
4344 {
4345         if (mddev->recovery_cp == MaxSector)
4346                 return sprintf(page, "none\n");
4347         return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
4348 }
4349
4350 static ssize_t
4351 resync_start_store(struct mddev *mddev, const char *buf, size_t len)
4352 {
4353         unsigned long long n;
4354         int err;
4355
4356         if (cmd_match(buf, "none"))
4357                 n = MaxSector;
4358         else {
4359                 err = kstrtoull(buf, 10, &n);
4360                 if (err < 0)
4361                         return err;
4362                 if (n != (sector_t)n)
4363                         return -EINVAL;
4364         }
4365
4366         err = mddev_lock(mddev);
4367         if (err)
4368                 return err;
4369         if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
4370                 err = -EBUSY;
4371
4372         if (!err) {
4373                 mddev->recovery_cp = n;
4374                 if (mddev->pers)
4375                         set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
4376         }
4377         mddev_unlock(mddev);
4378         return err ?: len;
4379 }
4380 static struct md_sysfs_entry md_resync_start =
4381 __ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR,
4382                 resync_start_show, resync_start_store);
4383
4384 /*
4385  * The array state can be:
4386  *
4387  * clear
4388  *     No devices, no size, no level
4389  *     Equivalent to STOP_ARRAY ioctl
4390  * inactive
4391  *     May have some settings, but array is not active
4392  *        all IO results in error
4393  *     When written, doesn't tear down array, but just stops it
4394  * suspended (not supported yet)
4395  *     All IO requests will block. The array can be reconfigured.
4396  *     Writing this, if accepted, will block until array is quiescent
4397  * readonly
4398  *     no resync can happen.  no superblocks get written.
4399  *     write requests fail
4400  * read-auto
4401  *     like readonly, but behaves like 'clean' on a write request.
4402  *
4403  * clean - no pending writes, but otherwise active.
4404  *     When written to inactive array, starts without resync
4405  *     If a write request arrives then
4406  *       if metadata is known, mark 'dirty' and switch to 'active'.
4407  *       if not known, block and switch to write-pending
4408  *     If written to an active array that has pending writes, then fails.
4409  * active
4410  *     fully active: IO and resync can be happening.
4411  *     When written to inactive array, starts with resync
4412  *
4413  * write-pending
4414  *     clean, but writes are blocked waiting for 'active' to be written.
4415  *
4416  * active-idle
4417  *     like active, but no writes have been seen for a while (100msec).
4418  *
4419  * broken
4420  *     RAID0/LINEAR-only: same as clean, but array is missing a member.
4421  *     It's useful because RAID0/LINEAR mounted-arrays aren't stopped
4422  *     when a member is gone, so this state will at least alert the
4423  *     user that something is wrong.
4424  */
4425 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
4426                    write_pending, active_idle, broken, bad_word};
4427 static char *array_states[] = {
4428         "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
4429         "write-pending", "active-idle", "broken", NULL };
4430
4431 static int match_word(const char *word, char **list)
4432 {
4433         int n;
4434         for (n=0; list[n]; n++)
4435                 if (cmd_match(word, list[n]))
4436                         break;
4437         return n;
4438 }
4439
4440 static ssize_t
4441 array_state_show(struct mddev *mddev, char *page)
4442 {
4443         enum array_state st = inactive;
4444
4445         if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags)) {
4446                 switch(mddev->ro) {
4447                 case 1:
4448                         st = readonly;
4449                         break;
4450                 case 2:
4451                         st = read_auto;
4452                         break;
4453                 case 0:
4454                         spin_lock(&mddev->lock);
4455                         if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
4456                                 st = write_pending;
4457                         else if (mddev->in_sync)
4458                                 st = clean;
4459                         else if (mddev->safemode)
4460                                 st = active_idle;
4461                         else
4462                                 st = active;
4463                         spin_unlock(&mddev->lock);
4464                 }
4465
4466                 if (test_bit(MD_BROKEN, &mddev->flags) && st == clean)
4467                         st = broken;
4468         } else {
4469                 if (list_empty(&mddev->disks) &&
4470                     mddev->raid_disks == 0 &&
4471                     mddev->dev_sectors == 0)
4472                         st = clear;
4473                 else
4474                         st = inactive;
4475         }
4476         return sprintf(page, "%s\n", array_states[st]);
4477 }
4478
4479 static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
4480 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
4481 static int restart_array(struct mddev *mddev);
4482
4483 static ssize_t
4484 array_state_store(struct mddev *mddev, const char *buf, size_t len)
4485 {
4486         int err = 0;
4487         enum array_state st = match_word(buf, array_states);
4488
4489         if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) {
4490                 /* don't take reconfig_mutex when toggling between
4491                  * clean and active
4492                  */
4493                 spin_lock(&mddev->lock);
4494                 if (st == active) {
4495                         restart_array(mddev);
4496                         clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
4497                         md_wakeup_thread(mddev->thread);
4498                         wake_up(&mddev->sb_wait);
4499                 } else /* st == clean */ {
4500                         restart_array(mddev);
4501                         if (!set_in_sync(mddev))
4502                                 err = -EBUSY;
4503                 }
4504                 if (!err)
4505                         sysfs_notify_dirent_safe(mddev->sysfs_state);
4506                 spin_unlock(&mddev->lock);
4507                 return err ?: len;
4508         }
4509         err = mddev_lock(mddev);
4510         if (err)
4511                 return err;
4512         err = -EINVAL;
4513         switch(st) {
4514         case bad_word:
4515                 break;
4516         case clear:
4517                 /* stopping an active array */
4518                 err = do_md_stop(mddev, 0, NULL);
4519                 break;
4520         case inactive:
4521                 /* stopping an active array */
4522                 if (mddev->pers)
4523                         err = do_md_stop(mddev, 2, NULL);
4524                 else
4525                         err = 0; /* already inactive */
4526                 break;
4527         case suspended:
4528                 break; /* not supported yet */
4529         case readonly:
4530                 if (mddev->pers)
4531                         err = md_set_readonly(mddev, NULL);
4532                 else {
4533                         mddev->ro = 1;
4534                         set_disk_ro(mddev->gendisk, 1);
4535                         err = do_md_run(mddev);
4536                 }
4537                 break;
4538         case read_auto:
4539                 if (mddev->pers) {
4540                         if (mddev->ro == 0)
4541                                 err = md_set_readonly(mddev, NULL);
4542                         else if (mddev->ro == 1)
4543                                 err = restart_array(mddev);
4544                         if (err == 0) {
4545                                 mddev->ro = 2;
4546                                 set_disk_ro(mddev->gendisk, 0);
4547                         }
4548                 } else {
4549                         mddev->ro = 2;
4550                         err = do_md_run(mddev);
4551                 }
4552                 break;
4553         case clean:
4554                 if (mddev->pers) {
4555                         err = restart_array(mddev);
4556                         if (err)
4557                                 break;
4558                         spin_lock(&mddev->lock);
4559                         if (!set_in_sync(mddev))
4560                                 err = -EBUSY;
4561                         spin_unlock(&mddev->lock);
4562                 } else
4563                         err = -EINVAL;
4564                 break;
4565         case active:
4566                 if (mddev->pers) {
4567                         err = restart_array(mddev);
4568                         if (err)
4569                                 break;
4570                         clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
4571                         wake_up(&mddev->sb_wait);
4572                         err = 0;
4573                 } else {
4574                         mddev->ro = 0;
4575                         set_disk_ro(mddev->gendisk, 0);
4576                         err = do_md_run(mddev);
4577                 }
4578                 break;
4579         case write_pending:
4580         case active_idle:
4581         case broken:
4582                 /* these cannot be set */
4583                 break;
4584         }
4585
4586         if (!err) {
4587                 if (mddev->hold_active == UNTIL_IOCTL)
4588                         mddev->hold_active = 0;
4589                 sysfs_notify_dirent_safe(mddev->sysfs_state);
4590         }
4591         mddev_unlock(mddev);
4592         return err ?: len;
4593 }
4594 static struct md_sysfs_entry md_array_state =
4595 __ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
4596
4597 static ssize_t
4598 max_corrected_read_errors_show(struct mddev *mddev, char *page) {
4599         return sprintf(page, "%d\n",
4600                        atomic_read(&mddev->max_corr_read_errors));
4601 }
4602
4603 static ssize_t
4604 max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len)
4605 {
4606         unsigned int n;
4607         int rv;
4608
4609         rv = kstrtouint(buf, 10, &n);
4610         if (rv < 0)
4611                 return rv;
4612         if (n > INT_MAX)
4613                 return -EINVAL;
4614         atomic_set(&mddev->max_corr_read_errors, n);
4615         return len;
4616 }
4617
4618 static struct md_sysfs_entry max_corr_read_errors =
4619 __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
4620         max_corrected_read_errors_store);
4621
4622 static ssize_t
4623 null_show(struct mddev *mddev, char *page)
4624 {
4625         return -EINVAL;
4626 }
4627
4628 /* need to ensure rdev_delayed_delete() has completed */
4629 static void flush_rdev_wq(struct mddev *mddev)
4630 {
4631         struct md_rdev *rdev;
4632
4633         rcu_read_lock();
4634         rdev_for_each_rcu(rdev, mddev)
4635                 if (work_pending(&rdev->del_work)) {
4636                         flush_workqueue(md_rdev_misc_wq);
4637                         break;
4638                 }
4639         rcu_read_unlock();
4640 }
4641
4642 static ssize_t
4643 new_dev_store(struct mddev *mddev, const char *buf, size_t len)
4644 {
4645         /* buf must be %d:%d\n? giving major and minor numbers */
4646         /* The new device is added to the array.
4647          * If the array has a persistent superblock, we read the
4648          * superblock to initialise info and check validity.
4649          * Otherwise, only checking done is that in bind_rdev_to_array,
4650          * which mainly checks size.
4651          */
4652         char *e;
4653         int major = simple_strtoul(buf, &e, 10);
4654         int minor;
4655         dev_t dev;
4656         struct md_rdev *rdev;
4657         int err;
4658
4659         if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
4660                 return -EINVAL;
4661         minor = simple_strtoul(e+1, &e, 10);
4662         if (*e && *e != '\n')
4663                 return -EINVAL;
4664         dev = MKDEV(major, minor);
4665         if (major != MAJOR(dev) ||
4666             minor != MINOR(dev))
4667                 return -EOVERFLOW;
4668
4669         flush_rdev_wq(mddev);
4670         err = mddev_lock(mddev);
4671         if (err)
4672                 return err;
4673         if (mddev->persistent) {
4674                 rdev = md_import_device(dev, mddev->major_version,
4675                                         mddev->minor_version);
4676                 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
4677                         struct md_rdev *rdev0
4678                                 = list_entry(mddev->disks.next,
4679                                              struct md_rdev, same_set);
4680                         err = super_types[mddev->major_version]
4681                                 .load_super(rdev, rdev0, mddev->minor_version);
4682                         if (err < 0)
4683                                 goto out;
4684                 }
4685         } else if (mddev->external)
4686                 rdev = md_import_device(dev, -2, -1);
4687         else
4688                 rdev = md_import_device(dev, -1, -1);
4689
4690         if (IS_ERR(rdev)) {
4691                 mddev_unlock(mddev);
4692                 return PTR_ERR(rdev);
4693         }
4694         err = bind_rdev_to_array(rdev, mddev);
4695  out:
4696         if (err)
4697                 export_rdev(rdev);
4698         mddev_unlock(mddev);
4699         if (!err)
4700                 md_new_event(mddev);
4701         return err ? err : len;
4702 }
4703
4704 static struct md_sysfs_entry md_new_device =
4705 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
4706
4707 static ssize_t
4708 bitmap_store(struct mddev *mddev, const char *buf, size_t len)
4709 {
4710         char *end;
4711         unsigned long chunk, end_chunk;
4712         int err;
4713
4714         err = mddev_lock(mddev);
4715         if (err)
4716                 return err;
4717         if (!mddev->bitmap)
4718                 goto out;
4719         /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
4720         while (*buf) {
4721                 chunk = end_chunk = simple_strtoul(buf, &end, 0);
4722                 if (buf == end) break;
4723                 if (*end == '-') { /* range */
4724                         buf = end + 1;
4725                         end_chunk = simple_strtoul(buf, &end, 0);
4726                         if (buf == end) break;
4727                 }
4728                 if (*end && !isspace(*end)) break;
4729                 md_bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
4730                 buf = skip_spaces(end);
4731         }
4732         md_bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
4733 out:
4734         mddev_unlock(mddev);
4735         return len;
4736 }
4737
4738 static struct md_sysfs_entry md_bitmap =
4739 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
4740
4741 static ssize_t
4742 size_show(struct mddev *mddev, char *page)
4743 {
4744         return sprintf(page, "%llu\n",
4745                 (unsigned long long)mddev->dev_sectors / 2);
4746 }
4747
4748 static int update_size(struct mddev *mddev, sector_t num_sectors);
4749
4750 static ssize_t
4751 size_store(struct mddev *mddev, const char *buf, size_t len)
4752 {
4753         /* If array is inactive, we can reduce the component size, but
4754          * not increase it (except from 0).
4755          * If array is active, we can try an on-line resize
4756          */
4757         sector_t sectors;
4758         int err = strict_blocks_to_sectors(buf, &sectors);
4759
4760         if (err < 0)
4761                 return err;
4762         err = mddev_lock(mddev);
4763         if (err)
4764                 return err;
4765         if (mddev->pers) {
4766                 err = update_size(mddev, sectors);
4767                 if (err == 0)
4768                         md_update_sb(mddev, 1);
4769         } else {
4770                 if (mddev->dev_sectors == 0 ||
4771                     mddev->dev_sectors > sectors)
4772                         mddev->dev_sectors = sectors;
4773                 else
4774                         err = -ENOSPC;
4775         }
4776         mddev_unlock(mddev);
4777         return err ? err : len;
4778 }
4779
4780 static struct md_sysfs_entry md_size =
4781 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
4782
4783 /* Metadata version.
4784  * This is one of
4785  *   'none' for arrays with no metadata (good luck...)
4786  *   'external' for arrays with externally managed metadata,
4787  * or N.M for internally known formats
4788  */
4789 static ssize_t
4790 metadata_show(struct mddev *mddev, char *page)
4791 {
4792         if (mddev->persistent)
4793                 return sprintf(page, "%d.%d\n",
4794                                mddev->major_version, mddev->minor_version);
4795         else if (mddev->external)
4796                 return sprintf(page, "external:%s\n", mddev->metadata_type);
4797         else
4798                 return sprintf(page, "none\n");
4799 }
4800
4801 static ssize_t
4802 metadata_store(struct mddev *mddev, const char *buf, size_t len)
4803 {
4804         int major, minor;
4805         char *e;
4806         int err;
4807         /* Changing the details of 'external' metadata is
4808          * always permitted.  Otherwise there must be
4809          * no devices attached to the array.
4810          */
4811
4812         err = mddev_lock(mddev);
4813         if (err)
4814                 return err;
4815         err = -EBUSY;
4816         if (mddev->external && strncmp(buf, "external:", 9) == 0)
4817                 ;
4818         else if (!list_empty(&mddev->disks))
4819                 goto out_unlock;
4820
4821         err = 0;
4822         if (cmd_match(buf, "none")) {
4823                 mddev->persistent = 0;
4824                 mddev->external = 0;
4825                 mddev->major_version = 0;
4826                 mddev->minor_version = 90;
4827                 goto out_unlock;
4828         }
4829         if (strncmp(buf, "external:", 9) == 0) {
4830                 size_t namelen = len-9;
4831                 if (namelen >= sizeof(mddev->metadata_type))
4832                         namelen = sizeof(mddev->metadata_type)-1;
4833                 strncpy(mddev->metadata_type, buf+9, namelen);
4834                 mddev->metadata_type[namelen] = 0;
4835                 if (namelen && mddev->metadata_type[namelen-1] == '\n')
4836                         mddev->metadata_type[--namelen] = 0;
4837                 mddev->persistent = 0;
4838                 mddev->external = 1;
4839                 mddev->major_version = 0;
4840                 mddev->minor_version = 90;
4841                 goto out_unlock;
4842         }
4843         major = simple_strtoul(buf, &e, 10);
4844         err = -EINVAL;
4845         if (e==buf || *e != '.')
4846                 goto out_unlock;
4847         buf = e+1;
4848         minor = simple_strtoul(buf, &e, 10);
4849         if (e==buf || (*e && *e != '\n') )
4850                 goto out_unlock;
4851         err = -ENOENT;
4852         if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
4853                 goto out_unlock;
4854         mddev->major_version = major;
4855         mddev->minor_version = minor;
4856         mddev->persistent = 1;
4857         mddev->external = 0;
4858         err = 0;
4859 out_unlock:
4860         mddev_unlock(mddev);
4861         return err ?: len;
4862 }
4863
4864 static struct md_sysfs_entry md_metadata =
4865 __ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
4866
4867 static ssize_t
4868 action_show(struct mddev *mddev, char *page)
4869 {
4870         char *type = "idle";
4871         unsigned long recovery = mddev->recovery;
4872         if (test_bit(MD_RECOVERY_FROZEN, &recovery))
4873                 type = "frozen";
4874         else if (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
4875             (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) {
4876                 if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
4877                         type = "reshape";
4878                 else if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
4879                         if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
4880                                 type = "resync";
4881                         else if (test_bit(MD_RECOVERY_CHECK, &recovery))
4882                                 type = "check";
4883                         else
4884                                 type = "repair";
4885                 } else if (test_bit(MD_RECOVERY_RECOVER, &recovery))
4886                         type = "recover";
4887                 else if (mddev->reshape_position != MaxSector)
4888                         type = "reshape";
4889         }
4890         return sprintf(page, "%s\n", type);
4891 }
4892
4893 static ssize_t
4894 action_store(struct mddev *mddev, const char *page, size_t len)
4895 {
4896         if (!mddev->pers || !mddev->pers->sync_request)
4897                 return -EINVAL;
4898
4899
4900         if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
4901                 if (cmd_match(page, "frozen"))
4902                         set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4903                 else
4904                         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4905                 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
4906                     mddev_lock(mddev) == 0) {
4907                         if (work_pending(&mddev->del_work))
4908                                 flush_workqueue(md_misc_wq);
4909                         if (mddev->sync_thread) {
4910                                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4911                                 md_reap_sync_thread(mddev);
4912                         }
4913                         mddev_unlock(mddev);
4914                 }
4915         } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4916                 return -EBUSY;
4917         else if (cmd_match(page, "resync"))
4918                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4919         else if (cmd_match(page, "recover")) {
4920                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4921                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
4922         } else if (cmd_match(page, "reshape")) {
4923                 int err;
4924                 if (mddev->pers->start_reshape == NULL)
4925                         return -EINVAL;
4926                 err = mddev_lock(mddev);
4927                 if (!err) {
4928                         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
4929                                 err =  -EBUSY;
4930                         } else if (mddev->reshape_position == MaxSector ||
4931                                    mddev->pers->check_reshape == NULL ||
4932                                    mddev->pers->check_reshape(mddev)) {
4933                                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4934                                 err = mddev->pers->start_reshape(mddev);
4935                         } else {
4936                                 /*
4937                                  * If reshape is still in progress, and
4938                                  * md_check_recovery() can continue to reshape,
4939                                  * don't restart reshape because data can be
4940                                  * corrupted for raid456.
4941                                  */
4942                                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4943                         }
4944                         mddev_unlock(mddev);
4945                 }
4946                 if (err)
4947                         return err;
4948                 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
4949         } else {
4950                 if (cmd_match(page, "check"))
4951                         set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4952                 else if (!cmd_match(page, "repair"))
4953                         return -EINVAL;
4954                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4955                 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
4956                 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4957         }
4958         if (mddev->ro == 2) {
4959                 /* A write to sync_action is enough to justify
4960                  * canceling read-auto mode
4961                  */
4962                 mddev->ro = 0;
4963                 md_wakeup_thread(mddev->sync_thread);
4964         }
4965         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4966         md_wakeup_thread(mddev->thread);
4967         sysfs_notify_dirent_safe(mddev->sysfs_action);
4968         return len;
4969 }
4970
4971 static struct md_sysfs_entry md_scan_mode =
4972 __ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
4973
4974 static ssize_t
4975 last_sync_action_show(struct mddev *mddev, char *page)
4976 {
4977         return sprintf(page, "%s\n", mddev->last_sync_action);
4978 }
4979
4980 static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action);
4981
4982 static ssize_t
4983 mismatch_cnt_show(struct mddev *mddev, char *page)
4984 {
4985         return sprintf(page, "%llu\n",
4986                        (unsigned long long)
4987                        atomic64_read(&mddev->resync_mismatches));
4988 }
4989
4990 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
4991
4992 static ssize_t
4993 sync_min_show(struct mddev *mddev, char *page)
4994 {
4995         return sprintf(page, "%d (%s)\n", speed_min(mddev),
4996                        mddev->sync_speed_min ? "local": "system");
4997 }
4998
4999 static ssize_t
5000 sync_min_store(struct mddev *mddev, const char *buf, size_t len)
5001 {
5002         unsigned int min;
5003         int rv;
5004
5005         if (strncmp(buf, "system", 6)==0) {
5006                 min = 0;
5007         } else {
5008                 rv = kstrtouint(buf, 10, &min);
5009                 if (rv < 0)
5010                         return rv;
5011                 if (min == 0)
5012                         return -EINVAL;
5013         }
5014         mddev->sync_speed_min = min;
5015         return len;
5016 }
5017
5018 static struct md_sysfs_entry md_sync_min =
5019 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
5020
5021 static ssize_t
5022 sync_max_show(struct mddev *mddev, char *page)
5023 {
5024         return sprintf(page, "%d (%s)\n", speed_max(mddev),
5025                        mddev->sync_speed_max ? "local": "system");
5026 }
5027
5028 static ssize_t
5029 sync_max_store(struct mddev *mddev, const char *buf, size_t len)
5030 {
5031         unsigned int max;
5032         int rv;
5033
5034         if (strncmp(buf, "system", 6)==0) {
5035                 max = 0;
5036         } else {
5037                 rv = kstrtouint(buf, 10, &max);
5038                 if (rv < 0)
5039                         return rv;
5040                 if (max == 0)
5041                         return -EINVAL;
5042         }
5043         mddev->sync_speed_max = max;
5044         return len;
5045 }
5046
5047 static struct md_sysfs_entry md_sync_max =
5048 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
5049
5050 static ssize_t
5051 degraded_show(struct mddev *mddev, char *page)
5052 {
5053         return sprintf(page, "%d\n", mddev->degraded);
5054 }
5055 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
5056
5057 static ssize_t
5058 sync_force_parallel_show(struct mddev *mddev, char *page)
5059 {
5060         return sprintf(page, "%d\n", mddev->parallel_resync);
5061 }
5062
5063 static ssize_t
5064 sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len)
5065 {
5066         long n;
5067
5068         if (kstrtol(buf, 10, &n))
5069                 return -EINVAL;
5070
5071         if (n != 0 && n != 1)
5072                 return -EINVAL;
5073
5074         mddev->parallel_resync = n;
5075
5076         if (mddev->sync_thread)
5077                 wake_up(&resync_wait);
5078
5079         return len;
5080 }
5081
5082 /* force parallel resync, even with shared block devices */
5083 static struct md_sysfs_entry md_sync_force_parallel =
5084 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
5085        sync_force_parallel_show, sync_force_parallel_store);
5086
5087 static ssize_t
5088 sync_speed_show(struct mddev *mddev, char *page)
5089 {
5090         unsigned long resync, dt, db;
5091         if (mddev->curr_resync == 0)
5092                 return sprintf(page, "none\n");
5093         resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
5094         dt = (jiffies - mddev->resync_mark) / HZ;
5095         if (!dt) dt++;
5096         db = resync - mddev->resync_mark_cnt;
5097         return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
5098 }
5099
5100 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
5101
5102 static ssize_t
5103 sync_completed_show(struct mddev *mddev, char *page)
5104 {
5105         unsigned long long max_sectors, resync;
5106
5107         if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5108                 return sprintf(page, "none\n");
5109
5110         if (mddev->curr_resync == 1 ||
5111             mddev->curr_resync == 2)
5112                 return sprintf(page, "delayed\n");
5113
5114         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
5115             test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
5116                 max_sectors = mddev->resync_max_sectors;
5117         else
5118                 max_sectors = mddev->dev_sectors;
5119
5120         resync = mddev->curr_resync_completed;
5121         return sprintf(page, "%llu / %llu\n", resync, max_sectors);
5122 }
5123
5124 static struct md_sysfs_entry md_sync_completed =
5125         __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL);
5126
5127 static ssize_t
5128 min_sync_show(struct mddev *mddev, char *page)
5129 {
5130         return sprintf(page, "%llu\n",
5131                        (unsigned long long)mddev->resync_min);
5132 }
5133 static ssize_t
5134 min_sync_store(struct mddev *mddev, const char *buf, size_t len)
5135 {
5136         unsigned long long min;
5137         int err;
5138
5139         if (kstrtoull(buf, 10, &min))
5140                 return -EINVAL;
5141
5142         spin_lock(&mddev->lock);
5143         err = -EINVAL;
5144         if (min > mddev->resync_max)
5145                 goto out_unlock;
5146
5147         err = -EBUSY;
5148         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5149                 goto out_unlock;
5150
5151         /* Round down to multiple of 4K for safety */
5152         mddev->resync_min = round_down(min, 8);
5153         err = 0;
5154
5155 out_unlock:
5156         spin_unlock(&mddev->lock);
5157         return err ?: len;
5158 }
5159
5160 static struct md_sysfs_entry md_min_sync =
5161 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
5162
5163 static ssize_t
5164 max_sync_show(struct mddev *mddev, char *page)
5165 {
5166         if (mddev->resync_max == MaxSector)
5167                 return sprintf(page, "max\n");
5168         else
5169                 return sprintf(page, "%llu\n",
5170                                (unsigned long long)mddev->resync_max);
5171 }
5172 static ssize_t
5173 max_sync_store(struct mddev *mddev, const char *buf, size_t len)
5174 {
5175         int err;
5176         spin_lock(&mddev->lock);
5177         if (strncmp(buf, "max", 3) == 0)
5178                 mddev->resync_max = MaxSector;
5179         else {
5180                 unsigned long long max;
5181                 int chunk;
5182
5183                 err = -EINVAL;
5184                 if (kstrtoull(buf, 10, &max))
5185                         goto out_unlock;
5186                 if (max < mddev->resync_min)
5187                         goto out_unlock;
5188
5189                 err = -EBUSY;
5190                 if (max < mddev->resync_max &&
5191                     mddev->ro == 0 &&
5192                     test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5193                         goto out_unlock;
5194
5195                 /* Must be a multiple of chunk_size */
5196                 chunk = mddev->chunk_sectors;
5197                 if (chunk) {
5198                         sector_t temp = max;
5199
5200                         err = -EINVAL;
5201                         if (sector_div(temp, chunk))
5202                                 goto out_unlock;
5203                 }
5204                 mddev->resync_max = max;
5205         }
5206         wake_up(&mddev->recovery_wait);
5207         err = 0;
5208 out_unlock:
5209         spin_unlock(&mddev->lock);
5210         return err ?: len;
5211 }
5212
5213 static struct md_sysfs_entry md_max_sync =
5214 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
5215
5216 static ssize_t
5217 suspend_lo_show(struct mddev *mddev, char *page)
5218 {
5219         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
5220 }
5221
5222 static ssize_t
5223 suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
5224 {
5225         unsigned long long new;
5226         int err;
5227
5228         err = kstrtoull(buf, 10, &new);
5229         if (err < 0)
5230                 return err;
5231         if (new != (sector_t)new)
5232                 return -EINVAL;
5233
5234         err = mddev_lock(mddev);
5235         if (err)
5236                 return err;
5237         err = -EINVAL;
5238         if (mddev->pers == NULL ||
5239             mddev->pers->quiesce == NULL)
5240                 goto unlock;
5241         mddev_suspend(mddev);
5242         mddev->suspend_lo = new;
5243         mddev_resume(mddev);
5244
5245         err = 0;
5246 unlock:
5247         mddev_unlock(mddev);
5248         return err ?: len;
5249 }
5250 static struct md_sysfs_entry md_suspend_lo =
5251 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
5252
5253 static ssize_t
5254 suspend_hi_show(struct mddev *mddev, char *page)
5255 {
5256         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
5257 }
5258
5259 static ssize_t
5260 suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
5261 {
5262         unsigned long long new;
5263         int err;
5264
5265         err = kstrtoull(buf, 10, &new);
5266         if (err < 0)
5267                 return err;
5268         if (new != (sector_t)new)
5269                 return -EINVAL;
5270
5271         err = mddev_lock(mddev);
5272         if (err)
5273                 return err;
5274         err = -EINVAL;
5275         if (mddev->pers == NULL)
5276                 goto unlock;
5277
5278         mddev_suspend(mddev);
5279         mddev->suspend_hi = new;
5280         mddev_resume(mddev);
5281
5282         err = 0;
5283 unlock:
5284         mddev_unlock(mddev);
5285         return err ?: len;
5286 }
5287 static struct md_sysfs_entry md_suspend_hi =
5288 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
5289
5290 static ssize_t
5291 reshape_position_show(struct mddev *mddev, char *page)
5292 {
5293         if (mddev->reshape_position != MaxSector)
5294                 return sprintf(page, "%llu\n",
5295                                (unsigned long long)mddev->reshape_position);
5296         strcpy(page, "none\n");
5297         return 5;
5298 }
5299
5300 static ssize_t
5301 reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
5302 {
5303         struct md_rdev *rdev;
5304         unsigned long long new;
5305         int err;
5306
5307         err = kstrtoull(buf, 10, &new);
5308         if (err < 0)
5309                 return err;
5310         if (new != (sector_t)new)
5311                 return -EINVAL;
5312         err = mddev_lock(mddev);
5313         if (err)
5314                 return err;
5315         err = -EBUSY;
5316         if (mddev->pers)
5317                 goto unlock;
5318         mddev->reshape_position = new;
5319         mddev->delta_disks = 0;
5320         mddev->reshape_backwards = 0;
5321         mddev->new_level = mddev->level;
5322         mddev->new_layout = mddev->layout;
5323         mddev->new_chunk_sectors = mddev->chunk_sectors;
5324         rdev_for_each(rdev, mddev)
5325                 rdev->new_data_offset = rdev->data_offset;
5326         err = 0;
5327 unlock:
5328         mddev_unlock(mddev);
5329         return err ?: len;
5330 }
5331
5332 static struct md_sysfs_entry md_reshape_position =
5333 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
5334        reshape_position_store);
5335
5336 static ssize_t
5337 reshape_direction_show(struct mddev *mddev, char *page)
5338 {
5339         return sprintf(page, "%s\n",
5340                        mddev->reshape_backwards ? "backwards" : "forwards");
5341 }
5342
5343 static ssize_t
5344 reshape_direction_store(struct mddev *mddev, const char *buf, size_t len)
5345 {
5346         int backwards = 0;
5347         int err;
5348
5349         if (cmd_match(buf, "forwards"))
5350                 backwards = 0;
5351         else if (cmd_match(buf, "backwards"))
5352                 backwards = 1;
5353         else
5354                 return -EINVAL;
5355         if (mddev->reshape_backwards == backwards)
5356                 return len;
5357
5358         err = mddev_lock(mddev);
5359         if (err)
5360                 return err;
5361         /* check if we are allowed to change */
5362         if (mddev->delta_disks)
5363                 err = -EBUSY;
5364         else if (mddev->persistent &&
5365             mddev->major_version == 0)
5366                 err =  -EINVAL;
5367         else
5368                 mddev->reshape_backwards = backwards;
5369         mddev_unlock(mddev);
5370         return err ?: len;
5371 }
5372
5373 static struct md_sysfs_entry md_reshape_direction =
5374 __ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show,
5375        reshape_direction_store);
5376
5377 static ssize_t
5378 array_size_show(struct mddev *mddev, char *page)
5379 {
5380         if (mddev->external_size)
5381                 return sprintf(page, "%llu\n",
5382                                (unsigned long long)mddev->array_sectors/2);
5383         else
5384                 return sprintf(page, "default\n");
5385 }
5386
5387 static ssize_t
5388 array_size_store(struct mddev *mddev, const char *buf, size_t len)
5389 {
5390         sector_t sectors;
5391         int err;
5392
5393         err = mddev_lock(mddev);
5394         if (err)
5395                 return err;
5396
5397         /* cluster raid doesn't support change array_sectors */
5398         if (mddev_is_clustered(mddev)) {
5399                 mddev_unlock(mddev);
5400                 return -EINVAL;
5401         }
5402
5403         if (strncmp(buf, "default", 7) == 0) {
5404                 if (mddev->pers)
5405                         sectors = mddev->pers->size(mddev, 0, 0);
5406                 else
5407                         sectors = mddev->array_sectors;
5408
5409                 mddev->external_size = 0;
5410         } else {
5411                 if (strict_blocks_to_sectors(buf, &sectors) < 0)
5412                         err = -EINVAL;
5413                 else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
5414                         err = -E2BIG;
5415                 else
5416                         mddev->external_size = 1;
5417         }
5418
5419         if (!err) {
5420                 mddev->array_sectors = sectors;
5421                 if (mddev->pers) {
5422                         set_capacity(mddev->gendisk, mddev->array_sectors);
5423                         revalidate_disk_size(mddev->gendisk, true);
5424                 }
5425         }
5426         mddev_unlock(mddev);
5427         return err ?: len;
5428 }
5429
5430 static struct md_sysfs_entry md_array_size =
5431 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
5432        array_size_store);
5433
5434 static ssize_t
5435 consistency_policy_show(struct mddev *mddev, char *page)
5436 {
5437         int ret;
5438
5439         if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
5440                 ret = sprintf(page, "journal\n");
5441         } else if (test_bit(MD_HAS_PPL, &mddev->flags)) {
5442                 ret = sprintf(page, "ppl\n");
5443         } else if (mddev->bitmap) {
5444                 ret = sprintf(page, "bitmap\n");
5445         } else if (mddev->pers) {
5446                 if (mddev->pers->sync_request)
5447                         ret = sprintf(page, "resync\n");
5448                 else
5449                         ret = sprintf(page, "none\n");
5450         } else {
5451                 ret = sprintf(page, "unknown\n");
5452         }
5453
5454         return ret;
5455 }
5456
5457 static ssize_t
5458 consistency_policy_store(struct mddev *mddev, const char *buf, size_t len)
5459 {
5460         int err = 0;
5461
5462         if (mddev->pers) {
5463                 if (mddev->pers->change_consistency_policy)
5464                         err = mddev->pers->change_consistency_policy(mddev, buf);
5465                 else
5466                         err = -EBUSY;
5467         } else if (mddev->external && strncmp(buf, "ppl", 3) == 0) {
5468                 set_bit(MD_HAS_PPL, &mddev->flags);
5469         } else {
5470                 err = -EINVAL;
5471         }
5472
5473         return err ? err : len;
5474 }
5475
5476 static struct md_sysfs_entry md_consistency_policy =
5477 __ATTR(consistency_policy, S_IRUGO | S_IWUSR, consistency_policy_show,
5478        consistency_policy_store);
5479
5480 static ssize_t fail_last_dev_show(struct mddev *mddev, char *page)
5481 {
5482         return sprintf(page, "%d\n", mddev->fail_last_dev);
5483 }
5484
5485 /*
5486  * Setting fail_last_dev to true to allow last device to be forcibly removed
5487  * from RAID1/RAID10.
5488  */
5489 static ssize_t
5490 fail_last_dev_store(struct mddev *mddev, const char *buf, size_t len)
5491 {
5492         int ret;
5493         bool value;
5494
5495         ret = kstrtobool(buf, &value);
5496         if (ret)
5497                 return ret;
5498
5499         if (value != mddev->fail_last_dev)
5500                 mddev->fail_last_dev = value;
5501
5502         return len;
5503 }
5504 static struct md_sysfs_entry md_fail_last_dev =
5505 __ATTR(fail_last_dev, S_IRUGO | S_IWUSR, fail_last_dev_show,
5506        fail_last_dev_store);
5507
5508 static ssize_t serialize_policy_show(struct mddev *mddev, char *page)
5509 {
5510         if (mddev->pers == NULL || (mddev->pers->level != 1))
5511                 return sprintf(page, "n/a\n");
5512         else
5513                 return sprintf(page, "%d\n", mddev->serialize_policy);
5514 }
5515
5516 /*
5517  * Setting serialize_policy to true to enforce write IO is not reordered
5518  * for raid1.
5519  */
5520 static ssize_t
5521 serialize_policy_store(struct mddev *mddev, const char *buf, size_t len)
5522 {
5523         int err;
5524         bool value;
5525
5526         err = kstrtobool(buf, &value);
5527         if (err)
5528                 return err;
5529
5530         if (value == mddev->serialize_policy)
5531                 return len;
5532
5533         err = mddev_lock(mddev);
5534         if (err)
5535                 return err;
5536         if (mddev->pers == NULL || (mddev->pers->level != 1)) {
5537                 pr_err("md: serialize_policy is only effective for raid1\n");
5538                 err = -EINVAL;
5539                 goto unlock;
5540         }
5541
5542         mddev_suspend(mddev);
5543         if (value)
5544                 mddev_create_serial_pool(mddev, NULL, true);
5545         else
5546                 mddev_destroy_serial_pool(mddev, NULL, true);
5547         mddev->serialize_policy = value;
5548         mddev_resume(mddev);
5549 unlock:
5550         mddev_unlock(mddev);
5551         return err ?: len;
5552 }
5553
5554 static struct md_sysfs_entry md_serialize_policy =
5555 __ATTR(serialize_policy, S_IRUGO | S_IWUSR, serialize_policy_show,
5556        serialize_policy_store);
5557
5558
5559 static struct attribute *md_default_attrs[] = {
5560         &md_level.attr,
5561         &md_layout.attr,
5562         &md_raid_disks.attr,
5563         &md_uuid.attr,
5564         &md_chunk_size.attr,
5565         &md_size.attr,
5566         &md_resync_start.attr,
5567         &md_metadata.attr,
5568         &md_new_device.attr,
5569         &md_safe_delay.attr,
5570         &md_array_state.attr,
5571         &md_reshape_position.attr,
5572         &md_reshape_direction.attr,
5573         &md_array_size.attr,
5574         &max_corr_read_errors.attr,
5575         &md_consistency_policy.attr,
5576         &md_fail_last_dev.attr,
5577         &md_serialize_policy.attr,
5578         NULL,
5579 };
5580
5581 static struct attribute *md_redundancy_attrs[] = {
5582         &md_scan_mode.attr,
5583         &md_last_scan_mode.attr,
5584         &md_mismatches.attr,
5585         &md_sync_min.attr,
5586         &md_sync_max.attr,
5587         &md_sync_speed.attr,
5588         &md_sync_force_parallel.attr,
5589         &md_sync_completed.attr,
5590         &md_min_sync.attr,
5591         &md_max_sync.attr,
5592         &md_suspend_lo.attr,
5593         &md_suspend_hi.attr,
5594         &md_bitmap.attr,
5595         &md_degraded.attr,
5596         NULL,
5597 };
5598 static struct attribute_group md_redundancy_group = {
5599         .name = NULL,
5600         .attrs = md_redundancy_attrs,
5601 };
5602
5603 static ssize_t
5604 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
5605 {
5606         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
5607         struct mddev *mddev = container_of(kobj, struct mddev, kobj);
5608         ssize_t rv;
5609
5610         if (!entry->show)
5611                 return -EIO;
5612         spin_lock(&all_mddevs_lock);
5613         if (list_empty(&mddev->all_mddevs)) {
5614                 spin_unlock(&all_mddevs_lock);
5615                 return -EBUSY;
5616         }
5617         mddev_get(mddev);
5618         spin_unlock(&all_mddevs_lock);
5619
5620         rv = entry->show(mddev, page);
5621         mddev_put(mddev);
5622         return rv;
5623 }
5624
5625 static ssize_t
5626 md_attr_store(struct kobject *kobj, struct attribute *attr,
5627               const char *page, size_t length)
5628 {
5629         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
5630         struct mddev *mddev = container_of(kobj, struct mddev, kobj);
5631         ssize_t rv;
5632
5633         if (!entry->store)
5634                 return -EIO;
5635         if (!capable(CAP_SYS_ADMIN))
5636                 return -EACCES;
5637         spin_lock(&all_mddevs_lock);
5638         if (list_empty(&mddev->all_mddevs)) {
5639                 spin_unlock(&all_mddevs_lock);
5640                 return -EBUSY;
5641         }
5642         mddev_get(mddev);
5643         spin_unlock(&all_mddevs_lock);
5644         rv = entry->store(mddev, page, length);
5645         mddev_put(mddev);
5646         return rv;
5647 }
5648
5649 static void md_free(struct kobject *ko)
5650 {
5651         struct mddev *mddev = container_of(ko, struct mddev, kobj);
5652
5653         if (mddev->sysfs_state)
5654                 sysfs_put(mddev->sysfs_state);
5655         if (mddev->sysfs_level)
5656                 sysfs_put(mddev->sysfs_level);
5657
5658         if (mddev->gendisk)
5659                 del_gendisk(mddev->gendisk);
5660         if (mddev->queue)
5661                 blk_cleanup_queue(mddev->queue);
5662         if (mddev->gendisk)
5663                 put_disk(mddev->gendisk);
5664         percpu_ref_exit(&mddev->writes_pending);
5665
5666         bioset_exit(&mddev->bio_set);
5667         bioset_exit(&mddev->sync_set);
5668         kfree(mddev);
5669 }
5670
5671 static const struct sysfs_ops md_sysfs_ops = {
5672         .show   = md_attr_show,
5673         .store  = md_attr_store,
5674 };
5675 static struct kobj_type md_ktype = {
5676         .release        = md_free,
5677         .sysfs_ops      = &md_sysfs_ops,
5678         .default_attrs  = md_default_attrs,
5679 };
5680
5681 int mdp_major = 0;
5682
5683 static void mddev_delayed_delete(struct work_struct *ws)
5684 {
5685         struct mddev *mddev = container_of(ws, struct mddev, del_work);
5686
5687         sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
5688         kobject_del(&mddev->kobj);
5689         kobject_put(&mddev->kobj);
5690 }
5691
5692 static void no_op(struct percpu_ref *r) {}
5693
5694 int mddev_init_writes_pending(struct mddev *mddev)
5695 {
5696         if (mddev->writes_pending.percpu_count_ptr)
5697                 return 0;
5698         if (percpu_ref_init(&mddev->writes_pending, no_op,
5699                             PERCPU_REF_ALLOW_REINIT, GFP_KERNEL) < 0)
5700                 return -ENOMEM;
5701         /* We want to start with the refcount at zero */
5702         percpu_ref_put(&mddev->writes_pending);
5703         return 0;
5704 }
5705 EXPORT_SYMBOL_GPL(mddev_init_writes_pending);
5706
5707 static int md_alloc(dev_t dev, char *name)
5708 {
5709         /*
5710          * If dev is zero, name is the name of a device to allocate with
5711          * an arbitrary minor number.  It will be "md_???"
5712          * If dev is non-zero it must be a device number with a MAJOR of
5713          * MD_MAJOR or mdp_major.  In this case, if "name" is NULL, then
5714          * the device is being created by opening a node in /dev.
5715          * If "name" is not NULL, the device is being created by
5716          * writing to /sys/module/md_mod/parameters/new_array.
5717          */
5718         static DEFINE_MUTEX(disks_mutex);
5719         struct mddev *mddev = mddev_find_or_alloc(dev);
5720         struct gendisk *disk;
5721         int partitioned;
5722         int shift;
5723         int unit;
5724         int error;
5725
5726         if (!mddev)
5727                 return -ENODEV;
5728
5729         partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
5730         shift = partitioned ? MdpMinorShift : 0;
5731         unit = MINOR(mddev->unit) >> shift;
5732
5733         /* wait for any previous instance of this device to be
5734          * completely removed (mddev_delayed_delete).
5735          */
5736         flush_workqueue(md_misc_wq);
5737         flush_workqueue(md_rdev_misc_wq);
5738
5739         mutex_lock(&disks_mutex);
5740         error = -EEXIST;
5741         if (mddev->gendisk)
5742                 goto abort;
5743
5744         if (name && !dev) {
5745                 /* Need to ensure that 'name' is not a duplicate.
5746                  */
5747                 struct mddev *mddev2;
5748                 spin_lock(&all_mddevs_lock);
5749
5750                 list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
5751                         if (mddev2->gendisk &&
5752                             strcmp(mddev2->gendisk->disk_name, name) == 0) {
5753                                 spin_unlock(&all_mddevs_lock);
5754                                 goto abort;
5755                         }
5756                 spin_unlock(&all_mddevs_lock);
5757         }
5758         if (name && dev)
5759                 /*
5760                  * Creating /dev/mdNNN via "newarray", so adjust hold_active.
5761                  */
5762                 mddev->hold_active = UNTIL_STOP;
5763
5764         error = -ENOMEM;
5765         mddev->queue = blk_alloc_queue(NUMA_NO_NODE);
5766         if (!mddev->queue)
5767                 goto abort;
5768
5769         blk_set_stacking_limits(&mddev->queue->limits);
5770
5771         disk = alloc_disk(1 << shift);
5772         if (!disk) {
5773                 blk_cleanup_queue(mddev->queue);
5774                 mddev->queue = NULL;
5775                 goto abort;
5776         }
5777         disk->major = MAJOR(mddev->unit);
5778         disk->first_minor = unit << shift;
5779         if (name)
5780                 strcpy(disk->disk_name, name);
5781         else if (partitioned)
5782                 sprintf(disk->disk_name, "md_d%d", unit);
5783         else
5784                 sprintf(disk->disk_name, "md%d", unit);
5785         disk->fops = &md_fops;
5786         disk->private_data = mddev;
5787         disk->queue = mddev->queue;
5788         blk_queue_write_cache(mddev->queue, true, true);
5789         /* Allow extended partitions.  This makes the
5790          * 'mdp' device redundant, but we can't really
5791          * remove it now.
5792          */
5793         disk->flags |= GENHD_FL_EXT_DEVT;
5794         disk->events |= DISK_EVENT_MEDIA_CHANGE;
5795         mddev->gendisk = disk;
5796         add_disk(disk);
5797
5798         error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md");
5799         if (error) {
5800                 /* This isn't possible, but as kobject_init_and_add is marked
5801                  * __must_check, we must do something with the result
5802                  */
5803                 pr_debug("md: cannot register %s/md - name in use\n",
5804                          disk->disk_name);
5805                 error = 0;
5806         }
5807         if (mddev->kobj.sd &&
5808             sysfs_create_group(&mddev->kobj, &md_bitmap_group))
5809                 pr_debug("pointless warning\n");
5810  abort:
5811         mutex_unlock(&disks_mutex);
5812         if (!error && mddev->kobj.sd) {
5813                 kobject_uevent(&mddev->kobj, KOBJ_ADD);
5814                 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
5815                 mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level");
5816         }
5817         mddev_put(mddev);
5818         return error;
5819 }
5820
5821 static struct kobject *md_probe(dev_t dev, int *part, void *data)
5822 {
5823         if (create_on_open)
5824                 md_alloc(dev, NULL);
5825         return NULL;
5826 }
5827
5828 static int add_named_array(const char *val, const struct kernel_param *kp)
5829 {
5830         /*
5831          * val must be "md_*" or "mdNNN".
5832          * For "md_*" we allocate an array with a large free minor number, and
5833          * set the name to val.  val must not already be an active name.
5834          * For "mdNNN" we allocate an array with the minor number NNN
5835          * which must not already be in use.
5836          */
5837         int len = strlen(val);
5838         char buf[DISK_NAME_LEN];
5839         unsigned long devnum;
5840
5841         while (len && val[len-1] == '\n')
5842                 len--;
5843         if (len >= DISK_NAME_LEN)
5844                 return -E2BIG;
5845         strlcpy(buf, val, len+1);
5846         if (strncmp(buf, "md_", 3) == 0)
5847                 return md_alloc(0, buf);
5848         if (strncmp(buf, "md", 2) == 0 &&
5849             isdigit(buf[2]) &&
5850             kstrtoul(buf+2, 10, &devnum) == 0 &&
5851             devnum <= MINORMASK)
5852                 return md_alloc(MKDEV(MD_MAJOR, devnum), NULL);
5853
5854         return -EINVAL;
5855 }
5856
5857 static void md_safemode_timeout(struct timer_list *t)
5858 {
5859         struct mddev *mddev = from_timer(mddev, t, safemode_timer);
5860
5861         mddev->safemode = 1;
5862         if (mddev->external)
5863                 sysfs_notify_dirent_safe(mddev->sysfs_state);
5864
5865         md_wakeup_thread(mddev->thread);
5866 }
5867
5868 static int start_dirty_degraded;
5869
5870 int md_run(struct mddev *mddev)
5871 {
5872         int err;
5873         struct md_rdev *rdev;
5874         struct md_personality *pers;
5875
5876         if (list_empty(&mddev->disks))
5877                 /* cannot run an array with no devices.. */
5878                 return -EINVAL;
5879
5880         if (mddev->pers)
5881                 return -EBUSY;
5882         /* Cannot run until previous stop completes properly */
5883         if (mddev->sysfs_active)
5884                 return -EBUSY;
5885
5886         /*
5887          * Analyze all RAID superblock(s)
5888          */
5889         if (!mddev->raid_disks) {
5890                 if (!mddev->persistent)
5891                         return -EINVAL;
5892                 err = analyze_sbs(mddev);
5893                 if (err)
5894                         return -EINVAL;
5895         }
5896
5897         if (mddev->level != LEVEL_NONE)
5898                 request_module("md-level-%d", mddev->level);
5899         else if (mddev->clevel[0])
5900                 request_module("md-%s", mddev->clevel);
5901
5902         /*
5903          * Drop all container device buffers, from now on
5904          * the only valid external interface is through the md
5905          * device.
5906          */
5907         mddev->has_superblocks = false;
5908         rdev_for_each(rdev, mddev) {
5909                 if (test_bit(Faulty, &rdev->flags))
5910                         continue;
5911                 sync_blockdev(rdev->bdev);
5912                 invalidate_bdev(rdev->bdev);
5913                 if (mddev->ro != 1 &&
5914                     (bdev_read_only(rdev->bdev) ||
5915                      bdev_read_only(rdev->meta_bdev))) {
5916                         mddev->ro = 1;
5917                         if (mddev->gendisk)
5918                                 set_disk_ro(mddev->gendisk, 1);
5919                 }
5920
5921                 if (rdev->sb_page)
5922                         mddev->has_superblocks = true;
5923
5924                 /* perform some consistency tests on the device.
5925                  * We don't want the data to overlap the metadata,
5926                  * Internal Bitmap issues have been handled elsewhere.
5927                  */
5928                 if (rdev->meta_bdev) {
5929                         /* Nothing to check */;
5930                 } else if (rdev->data_offset < rdev->sb_start) {
5931                         if (mddev->dev_sectors &&
5932                             rdev->data_offset + mddev->dev_sectors
5933                             > rdev->sb_start) {
5934                                 pr_warn("md: %s: data overlaps metadata\n",
5935                                         mdname(mddev));
5936                                 return -EINVAL;
5937                         }
5938                 } else {
5939                         if (rdev->sb_start + rdev->sb_size/512
5940                             > rdev->data_offset) {
5941                                 pr_warn("md: %s: metadata overlaps data\n",
5942                                         mdname(mddev));
5943                                 return -EINVAL;
5944                         }
5945                 }
5946                 sysfs_notify_dirent_safe(rdev->sysfs_state);
5947         }
5948
5949         if (!bioset_initialized(&mddev->bio_set)) {
5950                 err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
5951                 if (err)
5952                         return err;
5953         }
5954         if (!bioset_initialized(&mddev->sync_set)) {
5955                 err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
5956                 if (err)
5957                         return err;
5958         }
5959
5960         spin_lock(&pers_lock);
5961         pers = find_pers(mddev->level, mddev->clevel);
5962         if (!pers || !try_module_get(pers->owner)) {
5963                 spin_unlock(&pers_lock);
5964                 if (mddev->level != LEVEL_NONE)
5965                         pr_warn("md: personality for level %d is not loaded!\n",
5966                                 mddev->level);
5967                 else
5968                         pr_warn("md: personality for level %s is not loaded!\n",
5969                                 mddev->clevel);
5970                 err = -EINVAL;
5971                 goto abort;
5972         }
5973         spin_unlock(&pers_lock);
5974         if (mddev->level != pers->level) {
5975                 mddev->level = pers->level;
5976                 mddev->new_level = pers->level;
5977         }
5978         strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
5979
5980         if (mddev->reshape_position != MaxSector &&
5981             pers->start_reshape == NULL) {
5982                 /* This personality cannot handle reshaping... */
5983                 module_put(pers->owner);
5984                 err = -EINVAL;
5985                 goto abort;
5986         }
5987
5988         if (pers->sync_request) {
5989                 /* Warn if this is a potentially silly
5990                  * configuration.
5991                  */
5992                 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
5993                 struct md_rdev *rdev2;
5994                 int warned = 0;
5995
5996                 rdev_for_each(rdev, mddev)
5997                         rdev_for_each(rdev2, mddev) {
5998                                 if (rdev < rdev2 &&
5999                                     rdev->bdev->bd_disk ==
6000                                     rdev2->bdev->bd_disk) {
6001                                         pr_warn("%s: WARNING: %s appears to be on the same physical disk as %s.\n",
6002                                                 mdname(mddev),
6003                                                 bdevname(rdev->bdev,b),
6004                                                 bdevname(rdev2->bdev,b2));
6005                                         warned = 1;
6006                                 }
6007                         }
6008
6009                 if (warned)
6010                         pr_warn("True protection against single-disk failure might be compromised.\n");
6011         }
6012
6013         mddev->recovery = 0;
6014         /* may be over-ridden by personality */
6015         mddev->resync_max_sectors = mddev->dev_sectors;
6016
6017         mddev->ok_start_degraded = start_dirty_degraded;
6018
6019         if (start_readonly && mddev->ro == 0)
6020                 mddev->ro = 2; /* read-only, but switch on first write */
6021
6022         err = pers->run(mddev);
6023         if (err)
6024                 pr_warn("md: pers->run() failed ...\n");
6025         else if (pers->size(mddev, 0, 0) < mddev->array_sectors) {
6026                 WARN_ONCE(!mddev->external_size,
6027                           "%s: default size too small, but 'external_size' not in effect?\n",
6028                           __func__);
6029                 pr_warn("md: invalid array_size %llu > default size %llu\n",
6030                         (unsigned long long)mddev->array_sectors / 2,
6031                         (unsigned long long)pers->size(mddev, 0, 0) / 2);
6032                 err = -EINVAL;
6033         }
6034         if (err == 0 && pers->sync_request &&
6035             (mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
6036                 struct bitmap *bitmap;
6037
6038                 bitmap = md_bitmap_create(mddev, -1);
6039                 if (IS_ERR(bitmap)) {
6040                         err = PTR_ERR(bitmap);
6041                         pr_warn("%s: failed to create bitmap (%d)\n",
6042                                 mdname(mddev), err);
6043                 } else
6044                         mddev->bitmap = bitmap;
6045
6046         }
6047         if (err)
6048                 goto bitmap_abort;
6049
6050         if (mddev->bitmap_info.max_write_behind > 0) {
6051                 bool create_pool = false;
6052
6053                 rdev_for_each(rdev, mddev) {
6054                         if (test_bit(WriteMostly, &rdev->flags) &&
6055                             rdev_init_serial(rdev))
6056                                 create_pool = true;
6057                 }
6058                 if (create_pool && mddev->serial_info_pool == NULL) {
6059                         mddev->serial_info_pool =
6060                                 mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
6061                                                     sizeof(struct serial_info));
6062                         if (!mddev->serial_info_pool) {
6063                                 err = -ENOMEM;
6064                                 goto bitmap_abort;
6065                         }
6066                 }
6067         }
6068
6069         if (mddev->queue) {
6070                 bool nonrot = true;
6071
6072                 rdev_for_each(rdev, mddev) {
6073                         if (rdev->raid_disk >= 0 &&
6074                             !blk_queue_nonrot(bdev_get_queue(rdev->bdev))) {
6075                                 nonrot = false;
6076                                 break;
6077                         }
6078                 }
6079                 if (mddev->degraded)
6080                         nonrot = false;
6081                 if (nonrot)
6082                         blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue);
6083                 else
6084                         blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue);
6085         }
6086         if (pers->sync_request) {
6087                 if (mddev->kobj.sd &&
6088                     sysfs_create_group(&mddev->kobj, &md_redundancy_group))
6089                         pr_warn("md: cannot register extra attributes for %s\n",
6090                                 mdname(mddev));
6091                 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
6092                 mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
6093                 mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
6094         } else if (mddev->ro == 2) /* auto-readonly not meaningful */
6095                 mddev->ro = 0;
6096
6097         atomic_set(&mddev->max_corr_read_errors,
6098                    MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
6099         mddev->safemode = 0;
6100         if (mddev_is_clustered(mddev))
6101                 mddev->safemode_delay = 0;
6102         else
6103                 mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY;
6104         mddev->in_sync = 1;
6105         smp_wmb();
6106         spin_lock(&mddev->lock);
6107         mddev->pers = pers;
6108         spin_unlock(&mddev->lock);
6109         rdev_for_each(rdev, mddev)
6110                 if (rdev->raid_disk >= 0)
6111                         sysfs_link_rdev(mddev, rdev); /* failure here is OK */
6112
6113         if (mddev->degraded && !mddev->ro)
6114                 /* This ensures that recovering status is reported immediately
6115                  * via sysfs - until a lack of spares is confirmed.
6116                  */
6117                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
6118         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6119
6120         if (mddev->sb_flags)
6121                 md_update_sb(mddev, 0);
6122
6123         md_new_event(mddev);
6124         return 0;
6125
6126 bitmap_abort:
6127         mddev_detach(mddev);
6128         if (mddev->private)
6129                 pers->free(mddev, mddev->private);
6130         mddev->private = NULL;
6131         module_put(pers->owner);
6132         md_bitmap_destroy(mddev);
6133 abort:
6134         bioset_exit(&mddev->bio_set);
6135         bioset_exit(&mddev->sync_set);
6136         return err;
6137 }
6138 EXPORT_SYMBOL_GPL(md_run);
6139
6140 int do_md_run(struct mddev *mddev)
6141 {
6142         int err;
6143
6144         set_bit(MD_NOT_READY, &mddev->flags);
6145         err = md_run(mddev);
6146         if (err)
6147                 goto out;
6148         err = md_bitmap_load(mddev);
6149         if (err) {
6150                 md_bitmap_destroy(mddev);
6151                 goto out;
6152         }
6153
6154         if (mddev_is_clustered(mddev))
6155                 md_allow_write(mddev);
6156
6157         /* run start up tasks that require md_thread */
6158         md_start(mddev);
6159
6160         md_wakeup_thread(mddev->thread);
6161         md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
6162
6163         set_capacity(mddev->gendisk, mddev->array_sectors);
6164         revalidate_disk_size(mddev->gendisk, true);
6165         clear_bit(MD_NOT_READY, &mddev->flags);
6166         mddev->changed = 1;
6167         kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
6168         sysfs_notify_dirent_safe(mddev->sysfs_state);
6169         sysfs_notify_dirent_safe(mddev->sysfs_action);
6170         sysfs_notify_dirent_safe(mddev->sysfs_degraded);
6171 out:
6172         clear_bit(MD_NOT_READY, &mddev->flags);
6173         return err;
6174 }
6175
6176 int md_start(struct mddev *mddev)
6177 {
6178         int ret = 0;
6179
6180         if (mddev->pers->start) {
6181                 set_bit(MD_RECOVERY_WAIT, &mddev->recovery);
6182                 md_wakeup_thread(mddev->thread);
6183                 ret = mddev->pers->start(mddev);
6184                 clear_bit(MD_RECOVERY_WAIT, &mddev->recovery);
6185                 md_wakeup_thread(mddev->sync_thread);
6186         }
6187         return ret;
6188 }
6189 EXPORT_SYMBOL_GPL(md_start);
6190
6191 static int restart_array(struct mddev *mddev)
6192 {
6193         struct gendisk *disk = mddev->gendisk;
6194         struct md_rdev *rdev;
6195         bool has_journal = false;
6196         bool has_readonly = false;
6197
6198         /* Complain if it has no devices */
6199         if (list_empty(&mddev->disks))
6200                 return -ENXIO;
6201         if (!mddev->pers)
6202                 return -EINVAL;
6203         if (!mddev->ro)
6204                 return -EBUSY;
6205
6206         rcu_read_lock();
6207         rdev_for_each_rcu(rdev, mddev) {
6208                 if (test_bit(Journal, &rdev->flags) &&
6209                     !test_bit(Faulty, &rdev->flags))
6210                         has_journal = true;
6211                 if (bdev_read_only(rdev->bdev))
6212                         has_readonly = true;
6213         }
6214         rcu_read_unlock();
6215         if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !has_journal)
6216                 /* Don't restart rw with journal missing/faulty */
6217                         return -EINVAL;
6218         if (has_readonly)
6219                 return -EROFS;
6220
6221         mddev->safemode = 0;
6222         mddev->ro = 0;
6223         set_disk_ro(disk, 0);
6224         pr_debug("md: %s switched to read-write mode.\n", mdname(mddev));
6225         /* Kick recovery or resync if necessary */
6226         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6227         md_wakeup_thread(mddev->thread);
6228         md_wakeup_thread(mddev->sync_thread);
6229         sysfs_notify_dirent_safe(mddev->sysfs_state);
6230         return 0;
6231 }
6232
6233 static void md_clean(struct mddev *mddev)
6234 {
6235         mddev->array_sectors = 0;
6236         mddev->external_size = 0;
6237         mddev->dev_sectors = 0;
6238         mddev->raid_disks = 0;
6239         mddev->recovery_cp = 0;
6240         mddev->resync_min = 0;
6241         mddev->resync_max = MaxSector;
6242         mddev->reshape_position = MaxSector;
6243         mddev->external = 0;
6244         mddev->persistent = 0;
6245         mddev->level = LEVEL_NONE;
6246         mddev->clevel[0] = 0;
6247         /*
6248          * Don't clear MD_CLOSING, or mddev can be opened again.
6249          * 'hold_active != 0' means mddev is still in the creation
6250          * process and will be used later.
6251          */
6252         if (mddev->hold_active)
6253                 mddev->flags = 0;
6254         else
6255                 mddev->flags &= BIT_ULL_MASK(MD_CLOSING);
6256         mddev->sb_flags = 0;
6257         mddev->ro = 0;
6258         mddev->metadata_type[0] = 0;
6259         mddev->chunk_sectors = 0;
6260         mddev->ctime = mddev->utime = 0;
6261         mddev->layout = 0;
6262         mddev->max_disks = 0;
6263         mddev->events = 0;
6264         mddev->can_decrease_events = 0;
6265         mddev->delta_disks = 0;
6266         mddev->reshape_backwards = 0;
6267         mddev->new_level = LEVEL_NONE;
6268         mddev->new_layout = 0;
6269         mddev->new_chunk_sectors = 0;
6270         mddev->curr_resync = 0;
6271         atomic64_set(&mddev->resync_mismatches, 0);
6272         mddev->suspend_lo = mddev->suspend_hi = 0;
6273         mddev->sync_speed_min = mddev->sync_speed_max = 0;
6274         mddev->recovery = 0;
6275         mddev->in_sync = 0;
6276         mddev->changed = 0;
6277         mddev->degraded = 0;
6278         mddev->safemode = 0;
6279         mddev->private = NULL;
6280         mddev->cluster_info = NULL;
6281         mddev->bitmap_info.offset = 0;
6282         mddev->bitmap_info.default_offset = 0;
6283         mddev->bitmap_info.default_space = 0;
6284         mddev->bitmap_info.chunksize = 0;
6285         mddev->bitmap_info.daemon_sleep = 0;
6286         mddev->bitmap_info.max_write_behind = 0;
6287         mddev->bitmap_info.nodes = 0;
6288 }
6289
6290 static void __md_stop_writes(struct mddev *mddev)
6291 {
6292         set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6293         if (work_pending(&mddev->del_work))
6294                 flush_workqueue(md_misc_wq);
6295         if (mddev->sync_thread) {
6296                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6297                 md_reap_sync_thread(mddev);
6298         }
6299
6300         del_timer_sync(&mddev->safemode_timer);
6301
6302         if (mddev->pers && mddev->pers->quiesce) {
6303                 mddev->pers->quiesce(mddev, 1);
6304                 mddev->pers->quiesce(mddev, 0);
6305         }
6306         md_bitmap_flush(mddev);
6307
6308         if (mddev->ro == 0 &&
6309             ((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
6310              mddev->sb_flags)) {
6311                 /* mark array as shutdown cleanly */
6312                 if (!mddev_is_clustered(mddev))
6313                         mddev->in_sync = 1;
6314                 md_update_sb(mddev, 1);
6315         }
6316         /* disable policy to guarantee rdevs free resources for serialization */
6317         mddev->serialize_policy = 0;
6318         mddev_destroy_serial_pool(mddev, NULL, true);
6319 }
6320
6321 void md_stop_writes(struct mddev *mddev)
6322 {
6323         mddev_lock_nointr(mddev);
6324         __md_stop_writes(mddev);
6325         mddev_unlock(mddev);
6326 }
6327 EXPORT_SYMBOL_GPL(md_stop_writes);
6328
6329 static void mddev_detach(struct mddev *mddev)
6330 {
6331         md_bitmap_wait_behind_writes(mddev);
6332         if (mddev->pers && mddev->pers->quiesce && !mddev->suspended) {
6333                 mddev->pers->quiesce(mddev, 1);
6334                 mddev->pers->quiesce(mddev, 0);
6335         }
6336         md_unregister_thread(&mddev->thread);
6337         if (mddev->queue)
6338                 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
6339 }
6340
6341 static void __md_stop(struct mddev *mddev)
6342 {
6343         struct md_personality *pers = mddev->pers;
6344         md_bitmap_destroy(mddev);
6345         mddev_detach(mddev);
6346         /* Ensure ->event_work is done */
6347         if (mddev->event_work.func)
6348                 flush_workqueue(md_misc_wq);
6349         spin_lock(&mddev->lock);
6350         mddev->pers = NULL;
6351         spin_unlock(&mddev->lock);
6352         pers->free(mddev, mddev->private);
6353         mddev->private = NULL;
6354         if (pers->sync_request && mddev->to_remove == NULL)
6355                 mddev->to_remove = &md_redundancy_group;
6356         module_put(pers->owner);
6357         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6358 }
6359
6360 void md_stop(struct mddev *mddev)
6361 {
6362         lockdep_assert_held(&mddev->reconfig_mutex);
6363
6364         /* stop the array and free an attached data structures.
6365          * This is called from dm-raid
6366          */
6367         __md_stop_writes(mddev);
6368         __md_stop(mddev);
6369         bioset_exit(&mddev->bio_set);
6370         bioset_exit(&mddev->sync_set);
6371 }
6372
6373 EXPORT_SYMBOL_GPL(md_stop);
6374
6375 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
6376 {
6377         int err = 0;
6378         int did_freeze = 0;
6379
6380         if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
6381                 did_freeze = 1;
6382                 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6383                 md_wakeup_thread(mddev->thread);
6384         }
6385         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
6386                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6387         if (mddev->sync_thread)
6388                 /* Thread might be blocked waiting for metadata update
6389                  * which will now never happen */
6390                 wake_up_process(mddev->sync_thread->tsk);
6391
6392         if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
6393                 return -EBUSY;
6394         mddev_unlock(mddev);
6395         wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
6396                                           &mddev->recovery));
6397         wait_event(mddev->sb_wait,
6398                    !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
6399         mddev_lock_nointr(mddev);
6400
6401         mutex_lock(&mddev->open_mutex);
6402         if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
6403             mddev->sync_thread ||
6404             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
6405                 pr_warn("md: %s still in use.\n",mdname(mddev));
6406                 if (did_freeze) {
6407                         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6408                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6409                         md_wakeup_thread(mddev->thread);
6410                 }
6411                 err = -EBUSY;
6412                 goto out;
6413         }
6414         if (mddev->pers) {
6415                 __md_stop_writes(mddev);
6416
6417                 err  = -ENXIO;
6418                 if (mddev->ro==1)
6419                         goto out;
6420                 mddev->ro = 1;
6421                 set_disk_ro(mddev->gendisk, 1);
6422                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6423                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6424                 md_wakeup_thread(mddev->thread);
6425                 sysfs_notify_dirent_safe(mddev->sysfs_state);
6426                 err = 0;
6427         }
6428 out:
6429         mutex_unlock(&mddev->open_mutex);
6430         return err;
6431 }
6432
6433 /* mode:
6434  *   0 - completely stop and dis-assemble array
6435  *   2 - stop but do not disassemble array
6436  */
6437 static int do_md_stop(struct mddev *mddev, int mode,
6438                       struct block_device *bdev)
6439 {
6440         struct gendisk *disk = mddev->gendisk;
6441         struct md_rdev *rdev;
6442         int did_freeze = 0;
6443
6444         if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
6445                 did_freeze = 1;
6446                 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6447                 md_wakeup_thread(mddev->thread);
6448         }
6449         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
6450                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6451         if (mddev->sync_thread)
6452                 /* Thread might be blocked waiting for metadata update
6453                  * which will now never happen */
6454                 wake_up_process(mddev->sync_thread->tsk);
6455
6456         mddev_unlock(mddev);
6457         wait_event(resync_wait, (mddev->sync_thread == NULL &&
6458                                  !test_bit(MD_RECOVERY_RUNNING,
6459                                            &mddev->recovery)));
6460         mddev_lock_nointr(mddev);
6461
6462         mutex_lock(&mddev->open_mutex);
6463         if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
6464             mddev->sysfs_active ||
6465             mddev->sync_thread ||
6466             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
6467                 pr_warn("md: %s still in use.\n",mdname(mddev));
6468                 mutex_unlock(&mddev->open_mutex);
6469                 if (did_freeze) {
6470                         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6471                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6472                         md_wakeup_thread(mddev->thread);
6473                 }
6474                 return -EBUSY;
6475         }
6476         if (mddev->pers) {
6477                 if (mddev->ro)
6478                         set_disk_ro(disk, 0);
6479
6480                 __md_stop_writes(mddev);
6481                 __md_stop(mddev);
6482
6483                 /* tell userspace to handle 'inactive' */
6484                 sysfs_notify_dirent_safe(mddev->sysfs_state);
6485
6486                 rdev_for_each(rdev, mddev)
6487                         if (rdev->raid_disk >= 0)
6488                                 sysfs_unlink_rdev(mddev, rdev);
6489
6490                 set_capacity(disk, 0);
6491                 mutex_unlock(&mddev->open_mutex);
6492                 mddev->changed = 1;
6493                 revalidate_disk_size(disk, true);
6494
6495                 if (mddev->ro)
6496                         mddev->ro = 0;
6497         } else
6498                 mutex_unlock(&mddev->open_mutex);
6499         /*
6500          * Free resources if final stop
6501          */
6502         if (mode == 0) {
6503                 pr_info("md: %s stopped.\n", mdname(mddev));
6504
6505                 if (mddev->bitmap_info.file) {
6506                         struct file *f = mddev->bitmap_info.file;
6507                         spin_lock(&mddev->lock);
6508                         mddev->bitmap_info.file = NULL;
6509                         spin_unlock(&mddev->lock);
6510                         fput(f);
6511                 }
6512                 mddev->bitmap_info.offset = 0;
6513
6514                 export_array(mddev);
6515
6516                 md_clean(mddev);
6517                 if (mddev->hold_active == UNTIL_STOP)
6518                         mddev->hold_active = 0;
6519         }
6520         md_new_event(mddev);
6521         sysfs_notify_dirent_safe(mddev->sysfs_state);
6522         return 0;
6523 }
6524
6525 #ifndef MODULE
6526 static void autorun_array(struct mddev *mddev)
6527 {
6528         struct md_rdev *rdev;
6529         int err;
6530
6531         if (list_empty(&mddev->disks))
6532                 return;
6533
6534         pr_info("md: running: ");
6535
6536         rdev_for_each(rdev, mddev) {
6537                 char b[BDEVNAME_SIZE];
6538                 pr_cont("<%s>", bdevname(rdev->bdev,b));
6539         }
6540         pr_cont("\n");
6541
6542         err = do_md_run(mddev);
6543         if (err) {
6544                 pr_warn("md: do_md_run() returned %d\n", err);
6545                 do_md_stop(mddev, 0, NULL);
6546         }
6547 }
6548
6549 /*
6550  * lets try to run arrays based on all disks that have arrived
6551  * until now. (those are in pending_raid_disks)
6552  *
6553  * the method: pick the first pending disk, collect all disks with
6554  * the same UUID, remove all from the pending list and put them into
6555  * the 'same_array' list. Then order this list based on superblock
6556  * update time (freshest comes first), kick out 'old' disks and
6557  * compare superblocks. If everything's fine then run it.
6558  *
6559  * If "unit" is allocated, then bump its reference count
6560  */
6561 static void autorun_devices(int part)
6562 {
6563         struct md_rdev *rdev0, *rdev, *tmp;
6564         struct mddev *mddev;
6565         char b[BDEVNAME_SIZE];
6566
6567         pr_info("md: autorun ...\n");
6568         while (!list_empty(&pending_raid_disks)) {
6569                 int unit;
6570                 dev_t dev;
6571                 LIST_HEAD(candidates);
6572                 rdev0 = list_entry(pending_raid_disks.next,
6573                                          struct md_rdev, same_set);
6574
6575                 pr_debug("md: considering %s ...\n", bdevname(rdev0->bdev,b));
6576                 INIT_LIST_HEAD(&candidates);
6577                 rdev_for_each_list(rdev, tmp, &pending_raid_disks)
6578                         if (super_90_load(rdev, rdev0, 0) >= 0) {
6579                                 pr_debug("md:  adding %s ...\n",
6580                                          bdevname(rdev->bdev,b));
6581                                 list_move(&rdev->same_set, &candidates);
6582                         }
6583                 /*
6584                  * now we have a set of devices, with all of them having
6585                  * mostly sane superblocks. It's time to allocate the
6586                  * mddev.
6587                  */
6588                 if (part) {
6589                         dev = MKDEV(mdp_major,
6590                                     rdev0->preferred_minor << MdpMinorShift);
6591                         unit = MINOR(dev) >> MdpMinorShift;
6592                 } else {
6593                         dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
6594                         unit = MINOR(dev);
6595                 }
6596                 if (rdev0->preferred_minor != unit) {
6597                         pr_warn("md: unit number in %s is bad: %d\n",
6598                                 bdevname(rdev0->bdev, b), rdev0->preferred_minor);
6599                         break;
6600                 }
6601
6602                 md_probe(dev, NULL, NULL);
6603                 mddev = mddev_find(dev);
6604                 if (!mddev)
6605                         break;
6606
6607                 if (mddev_lock(mddev))
6608                         pr_warn("md: %s locked, cannot run\n", mdname(mddev));
6609                 else if (mddev->raid_disks || mddev->major_version
6610                          || !list_empty(&mddev->disks)) {
6611                         pr_warn("md: %s already running, cannot run %s\n",
6612                                 mdname(mddev), bdevname(rdev0->bdev,b));
6613                         mddev_unlock(mddev);
6614                 } else {
6615                         pr_debug("md: created %s\n", mdname(mddev));
6616                         mddev->persistent = 1;
6617                         rdev_for_each_list(rdev, tmp, &candidates) {
6618                                 list_del_init(&rdev->same_set);
6619                                 if (bind_rdev_to_array(rdev, mddev))
6620                                         export_rdev(rdev);
6621                         }
6622                         autorun_array(mddev);
6623                         mddev_unlock(mddev);
6624                 }
6625                 /* on success, candidates will be empty, on error
6626                  * it won't...
6627                  */
6628                 rdev_for_each_list(rdev, tmp, &candidates) {
6629                         list_del_init(&rdev->same_set);
6630                         export_rdev(rdev);
6631                 }
6632                 mddev_put(mddev);
6633         }
6634         pr_info("md: ... autorun DONE.\n");
6635 }
6636 #endif /* !MODULE */
6637
6638 static int get_version(void __user *arg)
6639 {
6640         mdu_version_t ver;
6641
6642         ver.major = MD_MAJOR_VERSION;
6643         ver.minor = MD_MINOR_VERSION;
6644         ver.patchlevel = MD_PATCHLEVEL_VERSION;
6645
6646         if (copy_to_user(arg, &ver, sizeof(ver)))
6647                 return -EFAULT;
6648
6649         return 0;
6650 }
6651
6652 static int get_array_info(struct mddev *mddev, void __user *arg)
6653 {
6654         mdu_array_info_t info;
6655         int nr,working,insync,failed,spare;
6656         struct md_rdev *rdev;
6657
6658         nr = working = insync = failed = spare = 0;
6659         rcu_read_lock();
6660         rdev_for_each_rcu(rdev, mddev) {
6661                 nr++;
6662                 if (test_bit(Faulty, &rdev->flags))
6663                         failed++;
6664                 else {
6665                         working++;
6666                         if (test_bit(In_sync, &rdev->flags))
6667                                 insync++;
6668                         else if (test_bit(Journal, &rdev->flags))
6669                                 /* TODO: add journal count to md_u.h */
6670                                 ;
6671                         else
6672                                 spare++;
6673                 }
6674         }
6675         rcu_read_unlock();
6676
6677         info.major_version = mddev->major_version;
6678         info.minor_version = mddev->minor_version;
6679         info.patch_version = MD_PATCHLEVEL_VERSION;
6680         info.ctime         = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
6681         info.level         = mddev->level;
6682         info.size          = mddev->dev_sectors / 2;
6683         if (info.size != mddev->dev_sectors / 2) /* overflow */
6684                 info.size = -1;
6685         info.nr_disks      = nr;
6686         info.raid_disks    = mddev->raid_disks;
6687         info.md_minor      = mddev->md_minor;
6688         info.not_persistent= !mddev->persistent;
6689
6690         info.utime         = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
6691         info.state         = 0;
6692         if (mddev->in_sync)
6693                 info.state = (1<<MD_SB_CLEAN);
6694         if (mddev->bitmap && mddev->bitmap_info.offset)
6695                 info.state |= (1<<MD_SB_BITMAP_PRESENT);
6696         if (mddev_is_clustered(mddev))
6697                 info.state |= (1<<MD_SB_CLUSTERED);
6698         info.active_disks  = insync;
6699         info.working_disks = working;
6700         info.failed_disks  = failed;
6701         info.spare_disks   = spare;
6702
6703         info.layout        = mddev->layout;
6704         info.chunk_size    = mddev->chunk_sectors << 9;
6705
6706         if (copy_to_user(arg, &info, sizeof(info)))
6707                 return -EFAULT;
6708
6709         return 0;
6710 }
6711
6712 static int get_bitmap_file(struct mddev *mddev, void __user * arg)
6713 {
6714         mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
6715         char *ptr;
6716         int err;
6717
6718         file = kzalloc(sizeof(*file), GFP_NOIO);
6719         if (!file)
6720                 return -ENOMEM;
6721
6722         err = 0;
6723         spin_lock(&mddev->lock);
6724         /* bitmap enabled */
6725         if (mddev->bitmap_info.file) {
6726                 ptr = file_path(mddev->bitmap_info.file, file->pathname,
6727                                 sizeof(file->pathname));
6728                 if (IS_ERR(ptr))
6729                         err = PTR_ERR(ptr);
6730                 else
6731                         memmove(file->pathname, ptr,
6732                                 sizeof(file->pathname)-(ptr-file->pathname));
6733         }
6734         spin_unlock(&mddev->lock);
6735
6736         if (err == 0 &&
6737             copy_to_user(arg, file, sizeof(*file)))
6738                 err = -EFAULT;
6739
6740         kfree(file);
6741         return err;
6742 }
6743
6744 static int get_disk_info(struct mddev *mddev, void __user * arg)
6745 {
6746         mdu_disk_info_t info;
6747         struct md_rdev *rdev;
6748
6749         if (copy_from_user(&info, arg, sizeof(info)))
6750                 return -EFAULT;
6751
6752         rcu_read_lock();
6753         rdev = md_find_rdev_nr_rcu(mddev, info.number);
6754         if (rdev) {
6755                 info.major = MAJOR(rdev->bdev->bd_dev);
6756                 info.minor = MINOR(rdev->bdev->bd_dev);
6757                 info.raid_disk = rdev->raid_disk;
6758                 info.state = 0;
6759                 if (test_bit(Faulty, &rdev->flags))
6760                         info.state |= (1<<MD_DISK_FAULTY);
6761                 else if (test_bit(In_sync, &rdev->flags)) {
6762                         info.state |= (1<<MD_DISK_ACTIVE);
6763                         info.state |= (1<<MD_DISK_SYNC);
6764                 }
6765                 if (test_bit(Journal, &rdev->flags))
6766                         info.state |= (1<<MD_DISK_JOURNAL);
6767                 if (test_bit(WriteMostly, &rdev->flags))
6768                         info.state |= (1<<MD_DISK_WRITEMOSTLY);
6769                 if (test_bit(FailFast, &rdev->flags))
6770                         info.state |= (1<<MD_DISK_FAILFAST);
6771         } else {
6772                 info.major = info.minor = 0;
6773                 info.raid_disk = -1;
6774                 info.state = (1<<MD_DISK_REMOVED);
6775         }
6776         rcu_read_unlock();
6777
6778         if (copy_to_user(arg, &info, sizeof(info)))
6779                 return -EFAULT;
6780
6781         return 0;
6782 }
6783
6784 int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
6785 {
6786         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
6787         struct md_rdev *rdev;
6788         dev_t dev = MKDEV(info->major,info->minor);
6789
6790         if (mddev_is_clustered(mddev) &&
6791                 !(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) {
6792                 pr_warn("%s: Cannot add to clustered mddev.\n",
6793                         mdname(mddev));
6794                 return -EINVAL;
6795         }
6796
6797         if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
6798                 return -EOVERFLOW;
6799
6800         if (!mddev->raid_disks) {
6801                 int err;
6802                 /* expecting a device which has a superblock */
6803                 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
6804                 if (IS_ERR(rdev)) {
6805                         pr_warn("md: md_import_device returned %ld\n",
6806                                 PTR_ERR(rdev));
6807                         return PTR_ERR(rdev);
6808                 }
6809                 if (!list_empty(&mddev->disks)) {
6810                         struct md_rdev *rdev0
6811                                 = list_entry(mddev->disks.next,
6812                                              struct md_rdev, same_set);
6813                         err = super_types[mddev->major_version]
6814                                 .load_super(rdev, rdev0, mddev->minor_version);
6815                         if (err < 0) {
6816                                 pr_warn("md: %s has different UUID to %s\n",
6817                                         bdevname(rdev->bdev,b),
6818                                         bdevname(rdev0->bdev,b2));
6819                                 export_rdev(rdev);
6820                                 return -EINVAL;
6821                         }
6822                 }
6823                 err = bind_rdev_to_array(rdev, mddev);
6824                 if (err)
6825                         export_rdev(rdev);
6826                 return err;
6827         }
6828
6829         /*
6830          * md_add_new_disk can be used once the array is assembled
6831          * to add "hot spares".  They must already have a superblock
6832          * written
6833          */
6834         if (mddev->pers) {
6835                 int err;
6836                 if (!mddev->pers->hot_add_disk) {
6837                         pr_warn("%s: personality does not support diskops!\n",
6838                                 mdname(mddev));
6839                         return -EINVAL;
6840                 }
6841                 if (mddev->persistent)
6842                         rdev = md_import_device(dev, mddev->major_version,
6843                                                 mddev->minor_version);
6844                 else
6845                         rdev = md_import_device(dev, -1, -1);
6846                 if (IS_ERR(rdev)) {
6847                         pr_warn("md: md_import_device returned %ld\n",
6848                                 PTR_ERR(rdev));
6849                         return PTR_ERR(rdev);
6850                 }
6851                 /* set saved_raid_disk if appropriate */
6852                 if (!mddev->persistent) {
6853                         if (info->state & (1<<MD_DISK_SYNC)  &&
6854                             info->raid_disk < mddev->raid_disks) {
6855                                 rdev->raid_disk = info->raid_disk;
6856                                 set_bit(In_sync, &rdev->flags);
6857                                 clear_bit(Bitmap_sync, &rdev->flags);
6858                         } else
6859                                 rdev->raid_disk = -1;
6860                         rdev->saved_raid_disk = rdev->raid_disk;
6861                 } else
6862                         super_types[mddev->major_version].
6863                                 validate_super(mddev, NULL/*freshest*/, rdev);
6864                 if ((info->state & (1<<MD_DISK_SYNC)) &&
6865                      rdev->raid_disk != info->raid_disk) {
6866                         /* This was a hot-add request, but events doesn't
6867                          * match, so reject it.
6868                          */
6869                         export_rdev(rdev);
6870                         return -EINVAL;
6871                 }
6872
6873                 clear_bit(In_sync, &rdev->flags); /* just to be sure */
6874                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6875                         set_bit(WriteMostly, &rdev->flags);
6876                 else
6877                         clear_bit(WriteMostly, &rdev->flags);
6878                 if (info->state & (1<<MD_DISK_FAILFAST))
6879                         set_bit(FailFast, &rdev->flags);
6880                 else
6881                         clear_bit(FailFast, &rdev->flags);
6882
6883                 if (info->state & (1<<MD_DISK_JOURNAL)) {
6884                         struct md_rdev *rdev2;
6885                         bool has_journal = false;
6886
6887                         /* make sure no existing journal disk */
6888                         rdev_for_each(rdev2, mddev) {
6889                                 if (test_bit(Journal, &rdev2->flags)) {
6890                                         has_journal = true;
6891                                         break;
6892                                 }
6893                         }
6894                         if (has_journal || mddev->bitmap) {
6895                                 export_rdev(rdev);
6896                                 return -EBUSY;
6897                         }
6898                         set_bit(Journal, &rdev->flags);
6899                 }
6900                 /*
6901                  * check whether the device shows up in other nodes
6902                  */
6903                 if (mddev_is_clustered(mddev)) {
6904                         if (info->state & (1 << MD_DISK_CANDIDATE))
6905                                 set_bit(Candidate, &rdev->flags);
6906                         else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) {
6907                                 /* --add initiated by this node */
6908                                 err = md_cluster_ops->add_new_disk(mddev, rdev);
6909                                 if (err) {
6910                                         export_rdev(rdev);
6911                                         return err;
6912                                 }
6913                         }
6914                 }
6915
6916                 rdev->raid_disk = -1;
6917                 err = bind_rdev_to_array(rdev, mddev);
6918
6919                 if (err)
6920                         export_rdev(rdev);
6921
6922                 if (mddev_is_clustered(mddev)) {
6923                         if (info->state & (1 << MD_DISK_CANDIDATE)) {
6924                                 if (!err) {
6925                                         err = md_cluster_ops->new_disk_ack(mddev,
6926                                                 err == 0);
6927                                         if (err)
6928                                                 md_kick_rdev_from_array(rdev);
6929                                 }
6930                         } else {
6931                                 if (err)
6932                                         md_cluster_ops->add_new_disk_cancel(mddev);
6933                                 else
6934                                         err = add_bound_rdev(rdev);
6935                         }
6936
6937                 } else if (!err)
6938                         err = add_bound_rdev(rdev);
6939
6940                 return err;
6941         }
6942
6943         /* otherwise, md_add_new_disk is only allowed
6944          * for major_version==0 superblocks
6945          */
6946         if (mddev->major_version != 0) {
6947                 pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev));
6948                 return -EINVAL;
6949         }
6950
6951         if (!(info->state & (1<<MD_DISK_FAULTY))) {
6952                 int err;
6953                 rdev = md_import_device(dev, -1, 0);
6954                 if (IS_ERR(rdev)) {
6955                         pr_warn("md: error, md_import_device() returned %ld\n",
6956                                 PTR_ERR(rdev));
6957                         return PTR_ERR(rdev);
6958                 }
6959                 rdev->desc_nr = info->number;
6960                 if (info->raid_disk < mddev->raid_disks)
6961                         rdev->raid_disk = info->raid_disk;
6962                 else
6963                         rdev->raid_disk = -1;
6964
6965                 if (rdev->raid_disk < mddev->raid_disks)
6966                         if (info->state & (1<<MD_DISK_SYNC))
6967                                 set_bit(In_sync, &rdev->flags);
6968
6969                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6970                         set_bit(WriteMostly, &rdev->flags);
6971                 if (info->state & (1<<MD_DISK_FAILFAST))
6972                         set_bit(FailFast, &rdev->flags);
6973
6974                 if (!mddev->persistent) {
6975                         pr_debug("md: nonpersistent superblock ...\n");
6976                         rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
6977                 } else
6978                         rdev->sb_start = calc_dev_sboffset(rdev);
6979                 rdev->sectors = rdev->sb_start;
6980
6981                 err = bind_rdev_to_array(rdev, mddev);
6982                 if (err) {
6983                         export_rdev(rdev);
6984                         return err;
6985                 }
6986         }
6987
6988         return 0;
6989 }
6990
6991 static int hot_remove_disk(struct mddev *mddev, dev_t dev)
6992 {
6993         char b[BDEVNAME_SIZE];
6994         struct md_rdev *rdev;
6995
6996         if (!mddev->pers)
6997                 return -ENODEV;
6998
6999         rdev = find_rdev(mddev, dev);
7000         if (!rdev)
7001                 return -ENXIO;
7002
7003         if (rdev->raid_disk < 0)
7004                 goto kick_rdev;
7005
7006         clear_bit(Blocked, &rdev->flags);
7007         remove_and_add_spares(mddev, rdev);
7008
7009         if (rdev->raid_disk >= 0)
7010                 goto busy;
7011
7012 kick_rdev:
7013         if (mddev_is_clustered(mddev)) {
7014                 if (md_cluster_ops->remove_disk(mddev, rdev))
7015                         goto busy;
7016         }
7017
7018         md_kick_rdev_from_array(rdev);
7019         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
7020         if (mddev->thread)
7021                 md_wakeup_thread(mddev->thread);
7022         else
7023                 md_update_sb(mddev, 1);
7024         md_new_event(mddev);
7025
7026         return 0;
7027 busy:
7028         pr_debug("md: cannot remove active disk %s from %s ...\n",
7029                  bdevname(rdev->bdev,b), mdname(mddev));
7030         return -EBUSY;
7031 }
7032
7033 static int hot_add_disk(struct mddev *mddev, dev_t dev)
7034 {
7035         char b[BDEVNAME_SIZE];
7036         int err;
7037         struct md_rdev *rdev;
7038
7039         if (!mddev->pers)
7040                 return -ENODEV;
7041
7042         if (mddev->major_version != 0) {
7043                 pr_warn("%s: HOT_ADD may only be used with version-0 superblocks.\n",
7044                         mdname(mddev));
7045                 return -EINVAL;
7046         }
7047         if (!mddev->pers->hot_add_disk) {
7048                 pr_warn("%s: personality does not support diskops!\n",
7049                         mdname(mddev));
7050                 return -EINVAL;
7051         }
7052
7053         rdev = md_import_device(dev, -1, 0);
7054         if (IS_ERR(rdev)) {
7055                 pr_warn("md: error, md_import_device() returned %ld\n",
7056                         PTR_ERR(rdev));
7057                 return -EINVAL;
7058         }
7059
7060         if (mddev->persistent)
7061                 rdev->sb_start = calc_dev_sboffset(rdev);
7062         else
7063                 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
7064
7065         rdev->sectors = rdev->sb_start;
7066
7067         if (test_bit(Faulty, &rdev->flags)) {
7068                 pr_warn("md: can not hot-add faulty %s disk to %s!\n",
7069                         bdevname(rdev->bdev,b), mdname(mddev));
7070                 err = -EINVAL;
7071                 goto abort_export;
7072         }
7073
7074         clear_bit(In_sync, &rdev->flags);
7075         rdev->desc_nr = -1;
7076         rdev->saved_raid_disk = -1;
7077         err = bind_rdev_to_array(rdev, mddev);
7078         if (err)
7079                 goto abort_export;
7080
7081         /*
7082          * The rest should better be atomic, we can have disk failures
7083          * noticed in interrupt contexts ...
7084          */
7085
7086         rdev->raid_disk = -1;
7087
7088         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
7089         if (!mddev->thread)
7090                 md_update_sb(mddev, 1);
7091         /*
7092          * Kick recovery, maybe this spare has to be added to the
7093          * array immediately.
7094          */
7095         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7096         md_wakeup_thread(mddev->thread);
7097         md_new_event(mddev);
7098         return 0;
7099
7100 abort_export:
7101         export_rdev(rdev);
7102         return err;
7103 }
7104
7105 static int set_bitmap_file(struct mddev *mddev, int fd)
7106 {
7107         int err = 0;
7108
7109         if (mddev->pers) {
7110                 if (!mddev->pers->quiesce || !mddev->thread)
7111                         return -EBUSY;
7112                 if (mddev->recovery || mddev->sync_thread)
7113                         return -EBUSY;
7114                 /* we should be able to change the bitmap.. */
7115         }
7116
7117         if (fd >= 0) {
7118                 struct inode *inode;
7119                 struct file *f;
7120
7121                 if (mddev->bitmap || mddev->bitmap_info.file)
7122                         return -EEXIST; /* cannot add when bitmap is present */
7123                 f = fget(fd);
7124
7125                 if (f == NULL) {
7126                         pr_warn("%s: error: failed to get bitmap file\n",
7127                                 mdname(mddev));
7128                         return -EBADF;
7129                 }
7130
7131                 inode = f->f_mapping->host;
7132                 if (!S_ISREG(inode->i_mode)) {
7133                         pr_warn("%s: error: bitmap file must be a regular file\n",
7134                                 mdname(mddev));
7135                         err = -EBADF;
7136                 } else if (!(f->f_mode & FMODE_WRITE)) {
7137                         pr_warn("%s: error: bitmap file must open for write\n",
7138                                 mdname(mddev));
7139                         err = -EBADF;
7140                 } else if (atomic_read(&inode->i_writecount) != 1) {
7141                         pr_warn("%s: error: bitmap file is already in use\n",
7142                                 mdname(mddev));
7143                         err = -EBUSY;
7144                 }
7145                 if (err) {
7146                         fput(f);
7147                         return err;
7148                 }
7149                 mddev->bitmap_info.file = f;
7150                 mddev->bitmap_info.offset = 0; /* file overrides offset */
7151         } else if (mddev->bitmap == NULL)
7152                 return -ENOENT; /* cannot remove what isn't there */
7153         err = 0;
7154         if (mddev->pers) {
7155                 if (fd >= 0) {
7156                         struct bitmap *bitmap;
7157
7158                         bitmap = md_bitmap_create(mddev, -1);
7159                         mddev_suspend(mddev);
7160                         if (!IS_ERR(bitmap)) {
7161                                 mddev->bitmap = bitmap;
7162                                 err = md_bitmap_load(mddev);
7163                         } else
7164                                 err = PTR_ERR(bitmap);
7165                         if (err) {
7166                                 md_bitmap_destroy(mddev);
7167                                 fd = -1;
7168                         }
7169                         mddev_resume(mddev);
7170                 } else if (fd < 0) {
7171                         mddev_suspend(mddev);
7172                         md_bitmap_destroy(mddev);
7173                         mddev_resume(mddev);
7174                 }
7175         }
7176         if (fd < 0) {
7177                 struct file *f = mddev->bitmap_info.file;
7178                 if (f) {
7179                         spin_lock(&mddev->lock);
7180                         mddev->bitmap_info.file = NULL;
7181                         spin_unlock(&mddev->lock);
7182                         fput(f);
7183                 }
7184         }
7185
7186         return err;
7187 }
7188
7189 /*
7190  * md_set_array_info is used two different ways
7191  * The original usage is when creating a new array.
7192  * In this usage, raid_disks is > 0 and it together with
7193  *  level, size, not_persistent,layout,chunksize determine the
7194  *  shape of the array.
7195  *  This will always create an array with a type-0.90.0 superblock.
7196  * The newer usage is when assembling an array.
7197  *  In this case raid_disks will be 0, and the major_version field is
7198  *  use to determine which style super-blocks are to be found on the devices.
7199  *  The minor and patch _version numbers are also kept incase the
7200  *  super_block handler wishes to interpret them.
7201  */
7202 int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info)
7203 {
7204         if (info->raid_disks == 0) {
7205                 /* just setting version number for superblock loading */
7206                 if (info->major_version < 0 ||
7207                     info->major_version >= ARRAY_SIZE(super_types) ||
7208                     super_types[info->major_version].name == NULL) {
7209                         /* maybe try to auto-load a module? */
7210                         pr_warn("md: superblock version %d not known\n",
7211                                 info->major_version);
7212                         return -EINVAL;
7213                 }
7214                 mddev->major_version = info->major_version;
7215                 mddev->minor_version = info->minor_version;
7216                 mddev->patch_version = info->patch_version;
7217                 mddev->persistent = !info->not_persistent;
7218                 /* ensure mddev_put doesn't delete this now that there
7219                  * is some minimal configuration.
7220                  */
7221                 mddev->ctime         = ktime_get_real_seconds();
7222                 return 0;
7223         }
7224         mddev->major_version = MD_MAJOR_VERSION;
7225         mddev->minor_version = MD_MINOR_VERSION;
7226         mddev->patch_version = MD_PATCHLEVEL_VERSION;
7227         mddev->ctime         = ktime_get_real_seconds();
7228
7229         mddev->level         = info->level;
7230         mddev->clevel[0]     = 0;
7231         mddev->dev_sectors   = 2 * (sector_t)info->size;
7232         mddev->raid_disks    = info->raid_disks;
7233         /* don't set md_minor, it is determined by which /dev/md* was
7234          * openned
7235          */
7236         if (info->state & (1<<MD_SB_CLEAN))
7237                 mddev->recovery_cp = MaxSector;
7238         else
7239                 mddev->recovery_cp = 0;
7240         mddev->persistent    = ! info->not_persistent;
7241         mddev->external      = 0;
7242
7243         mddev->layout        = info->layout;
7244         if (mddev->level == 0)
7245                 /* Cannot trust RAID0 layout info here */
7246                 mddev->layout = -1;
7247         mddev->chunk_sectors = info->chunk_size >> 9;
7248
7249         if (mddev->persistent) {
7250                 mddev->max_disks = MD_SB_DISKS;
7251                 mddev->flags = 0;
7252                 mddev->sb_flags = 0;
7253         }
7254         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
7255
7256         mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
7257         mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
7258         mddev->bitmap_info.offset = 0;
7259
7260         mddev->reshape_position = MaxSector;
7261
7262         /*
7263          * Generate a 128 bit UUID
7264          */
7265         get_random_bytes(mddev->uuid, 16);
7266
7267         mddev->new_level = mddev->level;
7268         mddev->new_chunk_sectors = mddev->chunk_sectors;
7269         mddev->new_layout = mddev->layout;
7270         mddev->delta_disks = 0;
7271         mddev->reshape_backwards = 0;
7272
7273         return 0;
7274 }
7275
7276 void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
7277 {
7278         lockdep_assert_held(&mddev->reconfig_mutex);
7279
7280         if (mddev->external_size)
7281                 return;
7282
7283         mddev->array_sectors = array_sectors;
7284 }
7285 EXPORT_SYMBOL(md_set_array_sectors);
7286
7287 static int update_size(struct mddev *mddev, sector_t num_sectors)
7288 {
7289         struct md_rdev *rdev;
7290         int rv;
7291         int fit = (num_sectors == 0);
7292         sector_t old_dev_sectors = mddev->dev_sectors;
7293
7294         if (mddev->pers->resize == NULL)
7295                 return -EINVAL;
7296         /* The "num_sectors" is the number of sectors of each device that
7297          * is used.  This can only make sense for arrays with redundancy.
7298          * linear and raid0 always use whatever space is available. We can only
7299          * consider changing this number if no resync or reconstruction is
7300          * happening, and if the new size is acceptable. It must fit before the
7301          * sb_start or, if that is <data_offset, it must fit before the size
7302          * of each device.  If num_sectors is zero, we find the largest size
7303          * that fits.
7304          */
7305         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
7306             mddev->sync_thread)
7307                 return -EBUSY;
7308         if (mddev->ro)
7309                 return -EROFS;
7310
7311         rdev_for_each(rdev, mddev) {
7312                 sector_t avail = rdev->sectors;
7313
7314                 if (fit && (num_sectors == 0 || num_sectors > avail))
7315                         num_sectors = avail;
7316                 if (avail < num_sectors)
7317                         return -ENOSPC;
7318         }
7319         rv = mddev->pers->resize(mddev, num_sectors);
7320         if (!rv) {
7321                 if (mddev_is_clustered(mddev))
7322                         md_cluster_ops->update_size(mddev, old_dev_sectors);
7323                 else if (mddev->queue) {
7324                         set_capacity(mddev->gendisk, mddev->array_sectors);
7325                         revalidate_disk_size(mddev->gendisk, true);
7326                 }
7327         }
7328         return rv;
7329 }
7330
7331 static int update_raid_disks(struct mddev *mddev, int raid_disks)
7332 {
7333         int rv;
7334         struct md_rdev *rdev;
7335         /* change the number of raid disks */
7336         if (mddev->pers->check_reshape == NULL)
7337                 return -EINVAL;
7338         if (mddev->ro)
7339                 return -EROFS;
7340         if (raid_disks <= 0 ||
7341             (mddev->max_disks && raid_disks >= mddev->max_disks))
7342                 return -EINVAL;
7343         if (mddev->sync_thread ||
7344             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
7345             test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) ||
7346             mddev->reshape_position != MaxSector)
7347                 return -EBUSY;
7348
7349         rdev_for_each(rdev, mddev) {
7350                 if (mddev->raid_disks < raid_disks &&
7351                     rdev->data_offset < rdev->new_data_offset)
7352                         return -EINVAL;
7353                 if (mddev->raid_disks > raid_disks &&
7354                     rdev->data_offset > rdev->new_data_offset)
7355                         return -EINVAL;
7356         }
7357
7358         mddev->delta_disks = raid_disks - mddev->raid_disks;
7359         if (mddev->delta_disks < 0)
7360                 mddev->reshape_backwards = 1;
7361         else if (mddev->delta_disks > 0)
7362                 mddev->reshape_backwards = 0;
7363
7364         rv = mddev->pers->check_reshape(mddev);
7365         if (rv < 0) {
7366                 mddev->delta_disks = 0;
7367                 mddev->reshape_backwards = 0;
7368         }
7369         return rv;
7370 }
7371
7372 /*
7373  * update_array_info is used to change the configuration of an
7374  * on-line array.
7375  * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
7376  * fields in the info are checked against the array.
7377  * Any differences that cannot be handled will cause an error.
7378  * Normally, only one change can be managed at a time.
7379  */
7380 static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
7381 {
7382         int rv = 0;
7383         int cnt = 0;
7384         int state = 0;
7385
7386         /* calculate expected state,ignoring low bits */
7387         if (mddev->bitmap && mddev->bitmap_info.offset)
7388                 state |= (1 << MD_SB_BITMAP_PRESENT);
7389
7390         if (mddev->major_version != info->major_version ||
7391             mddev->minor_version != info->minor_version ||
7392 /*          mddev->patch_version != info->patch_version || */
7393             mddev->ctime         != info->ctime         ||
7394             mddev->level         != info->level         ||
7395 /*          mddev->layout        != info->layout        || */
7396             mddev->persistent    != !info->not_persistent ||
7397             mddev->chunk_sectors != info->chunk_size >> 9 ||
7398             /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
7399             ((state^info->state) & 0xfffffe00)
7400                 )
7401                 return -EINVAL;
7402         /* Check there is only one change */
7403         if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
7404                 cnt++;
7405         if (mddev->raid_disks != info->raid_disks)
7406                 cnt++;
7407         if (mddev->layout != info->layout)
7408                 cnt++;
7409         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
7410                 cnt++;
7411         if (cnt == 0)
7412                 return 0;
7413         if (cnt > 1)
7414                 return -EINVAL;
7415
7416         if (mddev->layout != info->layout) {
7417                 /* Change layout
7418                  * we don't need to do anything at the md level, the
7419                  * personality will take care of it all.
7420                  */
7421                 if (mddev->pers->check_reshape == NULL)
7422                         return -EINVAL;
7423                 else {
7424                         mddev->new_layout = info->layout;
7425                         rv = mddev->pers->check_reshape(mddev);
7426                         if (rv)
7427                                 mddev->new_layout = mddev->layout;
7428                         return rv;
7429                 }
7430         }
7431         if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
7432                 rv = update_size(mddev, (sector_t)info->size * 2);
7433
7434         if (mddev->raid_disks    != info->raid_disks)
7435                 rv = update_raid_disks(mddev, info->raid_disks);
7436
7437         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
7438                 if (mddev->pers->quiesce == NULL || mddev->thread == NULL) {
7439                         rv = -EINVAL;
7440                         goto err;
7441                 }
7442                 if (mddev->recovery || mddev->sync_thread) {
7443                         rv = -EBUSY;
7444                         goto err;
7445                 }
7446                 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
7447                         struct bitmap *bitmap;
7448                         /* add the bitmap */
7449                         if (mddev->bitmap) {
7450                                 rv = -EEXIST;
7451                                 goto err;
7452                         }
7453                         if (mddev->bitmap_info.default_offset == 0) {
7454                                 rv = -EINVAL;
7455                                 goto err;
7456                         }
7457                         mddev->bitmap_info.offset =
7458                                 mddev->bitmap_info.default_offset;
7459                         mddev->bitmap_info.space =
7460                                 mddev->bitmap_info.default_space;
7461                         bitmap = md_bitmap_create(mddev, -1);
7462                         mddev_suspend(mddev);
7463                         if (!IS_ERR(bitmap)) {
7464                                 mddev->bitmap = bitmap;
7465                                 rv = md_bitmap_load(mddev);
7466                         } else
7467                                 rv = PTR_ERR(bitmap);
7468                         if (rv)
7469                                 md_bitmap_destroy(mddev);
7470                         mddev_resume(mddev);
7471                 } else {
7472                         /* remove the bitmap */
7473                         if (!mddev->bitmap) {
7474                                 rv = -ENOENT;
7475                                 goto err;
7476                         }
7477                         if (mddev->bitmap->storage.file) {
7478                                 rv = -EINVAL;
7479                                 goto err;
7480                         }
7481                         if (mddev->bitmap_info.nodes) {
7482                                 /* hold PW on all the bitmap lock */
7483                                 if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) {
7484                                         pr_warn("md: can't change bitmap to none since the array is in use by more than one node\n");
7485                                         rv = -EPERM;
7486                                         md_cluster_ops->unlock_all_bitmaps(mddev);
7487                                         goto err;
7488                                 }
7489
7490                                 mddev->bitmap_info.nodes = 0;
7491                                 md_cluster_ops->leave(mddev);
7492                                 module_put(md_cluster_mod);
7493                                 mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY;
7494                         }
7495                         mddev_suspend(mddev);
7496                         md_bitmap_destroy(mddev);
7497                         mddev_resume(mddev);
7498                         mddev->bitmap_info.offset = 0;
7499                 }
7500         }
7501         md_update_sb(mddev, 1);
7502         return rv;
7503 err:
7504         return rv;
7505 }
7506
7507 static int set_disk_faulty(struct mddev *mddev, dev_t dev)
7508 {
7509         struct md_rdev *rdev;
7510         int err = 0;
7511
7512         if (mddev->pers == NULL)
7513                 return -ENODEV;
7514
7515         rcu_read_lock();
7516         rdev = md_find_rdev_rcu(mddev, dev);
7517         if (!rdev)
7518                 err =  -ENODEV;
7519         else {
7520                 md_error(mddev, rdev);
7521                 if (!test_bit(Faulty, &rdev->flags))
7522                         err = -EBUSY;
7523         }
7524         rcu_read_unlock();
7525         return err;
7526 }
7527
7528 /*
7529  * We have a problem here : there is no easy way to give a CHS
7530  * virtual geometry. We currently pretend that we have a 2 heads
7531  * 4 sectors (with a BIG number of cylinders...). This drives
7532  * dosfs just mad... ;-)
7533  */
7534 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
7535 {
7536         struct mddev *mddev = bdev->bd_disk->private_data;
7537
7538         geo->heads = 2;
7539         geo->sectors = 4;
7540         geo->cylinders = mddev->array_sectors / 8;
7541         return 0;
7542 }
7543
7544 static inline bool md_ioctl_valid(unsigned int cmd)
7545 {
7546         switch (cmd) {
7547         case ADD_NEW_DISK:
7548         case GET_ARRAY_INFO:
7549         case GET_BITMAP_FILE:
7550         case GET_DISK_INFO:
7551         case HOT_ADD_DISK:
7552         case HOT_REMOVE_DISK:
7553         case RAID_VERSION:
7554         case RESTART_ARRAY_RW:
7555         case RUN_ARRAY:
7556         case SET_ARRAY_INFO:
7557         case SET_BITMAP_FILE:
7558         case SET_DISK_FAULTY:
7559         case STOP_ARRAY:
7560         case STOP_ARRAY_RO:
7561         case CLUSTERED_DISK_NACK:
7562                 return true;
7563         default:
7564                 return false;
7565         }
7566 }
7567
7568 static int md_ioctl(struct block_device *bdev, fmode_t mode,
7569                         unsigned int cmd, unsigned long arg)
7570 {
7571         int err = 0;
7572         void __user *argp = (void __user *)arg;
7573         struct mddev *mddev = NULL;
7574
7575         if (!md_ioctl_valid(cmd))
7576                 return -ENOTTY;
7577
7578         switch (cmd) {
7579         case RAID_VERSION:
7580         case GET_ARRAY_INFO:
7581         case GET_DISK_INFO:
7582                 break;
7583         default:
7584                 if (!capable(CAP_SYS_ADMIN))
7585                         return -EACCES;
7586         }
7587
7588         /*
7589          * Commands dealing with the RAID driver but not any
7590          * particular array:
7591          */
7592         switch (cmd) {
7593         case RAID_VERSION:
7594                 err = get_version(argp);
7595                 goto out;
7596         default:;
7597         }
7598
7599         /*
7600          * Commands creating/starting a new array:
7601          */
7602
7603         mddev = bdev->bd_disk->private_data;
7604
7605         if (!mddev) {
7606                 BUG();
7607                 goto out;
7608         }
7609
7610         /* Some actions do not requires the mutex */
7611         switch (cmd) {
7612         case GET_ARRAY_INFO:
7613                 if (!mddev->raid_disks && !mddev->external)
7614                         err = -ENODEV;
7615                 else
7616                         err = get_array_info(mddev, argp);
7617                 goto out;
7618
7619         case GET_DISK_INFO:
7620                 if (!mddev->raid_disks && !mddev->external)
7621                         err = -ENODEV;
7622                 else
7623                         err = get_disk_info(mddev, argp);
7624                 goto out;
7625
7626         case SET_DISK_FAULTY:
7627                 err = set_disk_faulty(mddev, new_decode_dev(arg));
7628                 goto out;
7629
7630         case GET_BITMAP_FILE:
7631                 err = get_bitmap_file(mddev, argp);
7632                 goto out;
7633
7634         }
7635
7636         if (cmd == ADD_NEW_DISK || cmd == HOT_ADD_DISK)
7637                 flush_rdev_wq(mddev);
7638
7639         if (cmd == HOT_REMOVE_DISK)
7640                 /* need to ensure recovery thread has run */
7641                 wait_event_interruptible_timeout(mddev->sb_wait,
7642                                                  !test_bit(MD_RECOVERY_NEEDED,
7643                                                            &mddev->recovery),
7644                                                  msecs_to_jiffies(5000));
7645         if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
7646                 /* Need to flush page cache, and ensure no-one else opens
7647                  * and writes
7648                  */
7649                 mutex_lock(&mddev->open_mutex);
7650                 if (mddev->pers && atomic_read(&mddev->openers) > 1) {
7651                         mutex_unlock(&mddev->open_mutex);
7652                         err = -EBUSY;
7653                         goto out;
7654                 }
7655                 if (test_and_set_bit(MD_CLOSING, &mddev->flags)) {
7656                         mutex_unlock(&mddev->open_mutex);
7657                         err = -EBUSY;
7658                         goto out;
7659                 }
7660                 mutex_unlock(&mddev->open_mutex);
7661                 sync_blockdev(bdev);
7662         }
7663         err = mddev_lock(mddev);
7664         if (err) {
7665                 pr_debug("md: ioctl lock interrupted, reason %d, cmd %d\n",
7666                          err, cmd);
7667                 goto out;
7668         }
7669
7670         if (cmd == SET_ARRAY_INFO) {
7671                 mdu_array_info_t info;
7672                 if (!arg)
7673                         memset(&info, 0, sizeof(info));
7674                 else if (copy_from_user(&info, argp, sizeof(info))) {
7675                         err = -EFAULT;
7676                         goto unlock;
7677                 }
7678                 if (mddev->pers) {
7679                         err = update_array_info(mddev, &info);
7680                         if (err) {
7681                                 pr_warn("md: couldn't update array info. %d\n", err);
7682                                 goto unlock;
7683                         }
7684                         goto unlock;
7685                 }
7686                 if (!list_empty(&mddev->disks)) {
7687                         pr_warn("md: array %s already has disks!\n", mdname(mddev));
7688                         err = -EBUSY;
7689                         goto unlock;
7690                 }
7691                 if (mddev->raid_disks) {
7692                         pr_warn("md: array %s already initialised!\n", mdname(mddev));
7693                         err = -EBUSY;
7694                         goto unlock;
7695                 }
7696                 err = md_set_array_info(mddev, &info);
7697                 if (err) {
7698                         pr_warn("md: couldn't set array info. %d\n", err);
7699                         goto unlock;
7700                 }
7701                 goto unlock;
7702         }
7703
7704         /*
7705          * Commands querying/configuring an existing array:
7706          */
7707         /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
7708          * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
7709         if ((!mddev->raid_disks && !mddev->external)
7710             && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
7711             && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
7712             && cmd != GET_BITMAP_FILE) {
7713                 err = -ENODEV;
7714                 goto unlock;
7715         }
7716
7717         /*
7718          * Commands even a read-only array can execute:
7719          */
7720         switch (cmd) {
7721         case RESTART_ARRAY_RW:
7722                 err = restart_array(mddev);
7723                 goto unlock;
7724
7725         case STOP_ARRAY:
7726                 err = do_md_stop(mddev, 0, bdev);
7727                 goto unlock;
7728
7729         case STOP_ARRAY_RO:
7730                 err = md_set_readonly(mddev, bdev);
7731                 goto unlock;
7732
7733         case HOT_REMOVE_DISK:
7734                 err = hot_remove_disk(mddev, new_decode_dev(arg));
7735                 goto unlock;
7736
7737         case ADD_NEW_DISK:
7738                 /* We can support ADD_NEW_DISK on read-only arrays
7739                  * only if we are re-adding a preexisting device.
7740                  * So require mddev->pers and MD_DISK_SYNC.
7741                  */
7742                 if (mddev->pers) {
7743                         mdu_disk_info_t info;
7744                         if (copy_from_user(&info, argp, sizeof(info)))
7745                                 err = -EFAULT;
7746                         else if (!(info.state & (1<<MD_DISK_SYNC)))
7747                                 /* Need to clear read-only for this */
7748                                 break;
7749                         else
7750                                 err = md_add_new_disk(mddev, &info);
7751                         goto unlock;
7752                 }
7753                 break;
7754         }
7755
7756         /*
7757          * The remaining ioctls are changing the state of the
7758          * superblock, so we do not allow them on read-only arrays.
7759          */
7760         if (mddev->ro && mddev->pers) {
7761                 if (mddev->ro == 2) {
7762                         mddev->ro = 0;
7763                         sysfs_notify_dirent_safe(mddev->sysfs_state);
7764                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7765                         /* mddev_unlock will wake thread */
7766                         /* If a device failed while we were read-only, we
7767                          * need to make sure the metadata is updated now.
7768                          */
7769                         if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) {
7770                                 mddev_unlock(mddev);
7771                                 wait_event(mddev->sb_wait,
7772                                            !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) &&
7773                                            !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
7774                                 mddev_lock_nointr(mddev);
7775                         }
7776                 } else {
7777                         err = -EROFS;
7778                         goto unlock;
7779                 }
7780         }
7781
7782         switch (cmd) {
7783         case ADD_NEW_DISK:
7784         {
7785                 mdu_disk_info_t info;
7786                 if (copy_from_user(&info, argp, sizeof(info)))
7787                         err = -EFAULT;
7788                 else
7789                         err = md_add_new_disk(mddev, &info);
7790                 goto unlock;
7791         }
7792
7793         case CLUSTERED_DISK_NACK:
7794                 if (mddev_is_clustered(mddev))
7795                         md_cluster_ops->new_disk_ack(mddev, false);
7796                 else
7797                         err = -EINVAL;
7798                 goto unlock;
7799
7800         case HOT_ADD_DISK:
7801                 err = hot_add_disk(mddev, new_decode_dev(arg));
7802                 goto unlock;
7803
7804         case RUN_ARRAY:
7805                 err = do_md_run(mddev);
7806                 goto unlock;
7807
7808         case SET_BITMAP_FILE:
7809                 err = set_bitmap_file(mddev, (int)arg);
7810                 goto unlock;
7811
7812         default:
7813                 err = -EINVAL;
7814                 goto unlock;
7815         }
7816
7817 unlock:
7818         if (mddev->hold_active == UNTIL_IOCTL &&
7819             err != -EINVAL)
7820                 mddev->hold_active = 0;
7821         mddev_unlock(mddev);
7822 out:
7823         if (cmd == STOP_ARRAY_RO || (err && cmd == STOP_ARRAY))
7824                 clear_bit(MD_CLOSING, &mddev->flags);
7825         return err;
7826 }
7827 #ifdef CONFIG_COMPAT
7828 static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
7829                     unsigned int cmd, unsigned long arg)
7830 {
7831         switch (cmd) {
7832         case HOT_REMOVE_DISK:
7833         case HOT_ADD_DISK:
7834         case SET_DISK_FAULTY:
7835         case SET_BITMAP_FILE:
7836                 /* These take in integer arg, do not convert */
7837                 break;
7838         default:
7839                 arg = (unsigned long)compat_ptr(arg);
7840                 break;
7841         }
7842
7843         return md_ioctl(bdev, mode, cmd, arg);
7844 }
7845 #endif /* CONFIG_COMPAT */
7846
7847 static int md_set_read_only(struct block_device *bdev, bool ro)
7848 {
7849         struct mddev *mddev = bdev->bd_disk->private_data;
7850         int err;
7851
7852         err = mddev_lock(mddev);
7853         if (err)
7854                 return err;
7855
7856         if (!mddev->raid_disks && !mddev->external) {
7857                 err = -ENODEV;
7858                 goto out_unlock;
7859         }
7860
7861         /*
7862          * Transitioning to read-auto need only happen for arrays that call
7863          * md_write_start and which are not ready for writes yet.
7864          */
7865         if (!ro && mddev->ro == 1 && mddev->pers) {
7866                 err = restart_array(mddev);
7867                 if (err)
7868                         goto out_unlock;
7869                 mddev->ro = 2;
7870         }
7871
7872 out_unlock:
7873         mddev_unlock(mddev);
7874         return err;
7875 }
7876
7877 static int md_open(struct block_device *bdev, fmode_t mode)
7878 {
7879         /*
7880          * Succeed if we can lock the mddev, which confirms that
7881          * it isn't being stopped right now.
7882          */
7883         struct mddev *mddev = mddev_find(bdev->bd_dev);
7884         int err;
7885
7886         if (!mddev)
7887                 return -ENODEV;
7888
7889         if (mddev->gendisk != bdev->bd_disk) {
7890                 /* we are racing with mddev_put which is discarding this
7891                  * bd_disk.
7892                  */
7893                 mddev_put(mddev);
7894                 /* Wait until bdev->bd_disk is definitely gone */
7895                 if (work_pending(&mddev->del_work))
7896                         flush_workqueue(md_misc_wq);
7897                 return -EBUSY;
7898         }
7899         BUG_ON(mddev != bdev->bd_disk->private_data);
7900
7901         if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
7902                 goto out;
7903
7904         if (test_bit(MD_CLOSING, &mddev->flags)) {
7905                 mutex_unlock(&mddev->open_mutex);
7906                 err = -ENODEV;
7907                 goto out;
7908         }
7909
7910         err = 0;
7911         atomic_inc(&mddev->openers);
7912         mutex_unlock(&mddev->open_mutex);
7913
7914         bdev_check_media_change(bdev);
7915  out:
7916         if (err)
7917                 mddev_put(mddev);
7918         return err;
7919 }
7920
7921 static void md_release(struct gendisk *disk, fmode_t mode)
7922 {
7923         struct mddev *mddev = disk->private_data;
7924
7925         BUG_ON(!mddev);
7926         atomic_dec(&mddev->openers);
7927         mddev_put(mddev);
7928 }
7929
7930 static unsigned int md_check_events(struct gendisk *disk, unsigned int clearing)
7931 {
7932         struct mddev *mddev = disk->private_data;
7933         unsigned int ret = 0;
7934
7935         if (mddev->changed)
7936                 ret = DISK_EVENT_MEDIA_CHANGE;
7937         mddev->changed = 0;
7938         return ret;
7939 }
7940
7941 const struct block_device_operations md_fops =
7942 {
7943         .owner          = THIS_MODULE,
7944         .submit_bio     = md_submit_bio,
7945         .open           = md_open,
7946         .release        = md_release,
7947         .ioctl          = md_ioctl,
7948 #ifdef CONFIG_COMPAT
7949         .compat_ioctl   = md_compat_ioctl,
7950 #endif
7951         .getgeo         = md_getgeo,
7952         .check_events   = md_check_events,
7953         .set_read_only  = md_set_read_only,
7954 };
7955
7956 static int md_thread(void *arg)
7957 {
7958         struct md_thread *thread = arg;
7959
7960         /*
7961          * md_thread is a 'system-thread', it's priority should be very
7962          * high. We avoid resource deadlocks individually in each
7963          * raid personality. (RAID5 does preallocation) We also use RR and
7964          * the very same RT priority as kswapd, thus we will never get
7965          * into a priority inversion deadlock.
7966          *
7967          * we definitely have to have equal or higher priority than
7968          * bdflush, otherwise bdflush will deadlock if there are too
7969          * many dirty RAID5 blocks.
7970          */
7971
7972         allow_signal(SIGKILL);
7973         while (!kthread_should_stop()) {
7974
7975                 /* We need to wait INTERRUPTIBLE so that
7976                  * we don't add to the load-average.
7977                  * That means we need to be sure no signals are
7978                  * pending
7979                  */
7980                 if (signal_pending(current))
7981                         flush_signals(current);
7982
7983                 wait_event_interruptible_timeout
7984                         (thread->wqueue,
7985                          test_bit(THREAD_WAKEUP, &thread->flags)
7986                          || kthread_should_stop() || kthread_should_park(),
7987                          thread->timeout);
7988
7989                 clear_bit(THREAD_WAKEUP, &thread->flags);
7990                 if (kthread_should_park())
7991                         kthread_parkme();
7992                 if (!kthread_should_stop())
7993                         thread->run(thread);
7994         }
7995
7996         return 0;
7997 }
7998
7999 void md_wakeup_thread(struct md_thread *thread)
8000 {
8001         if (thread) {
8002                 pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm);
8003                 set_bit(THREAD_WAKEUP, &thread->flags);
8004                 wake_up(&thread->wqueue);
8005         }
8006 }
8007 EXPORT_SYMBOL(md_wakeup_thread);
8008
8009 struct md_thread *md_register_thread(void (*run) (struct md_thread *),
8010                 struct mddev *mddev, const char *name)
8011 {
8012         struct md_thread *thread;
8013
8014         thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL);
8015         if (!thread)
8016                 return NULL;
8017
8018         init_waitqueue_head(&thread->wqueue);
8019
8020         thread->run = run;
8021         thread->mddev = mddev;
8022         thread->timeout = MAX_SCHEDULE_TIMEOUT;
8023         thread->tsk = kthread_run(md_thread, thread,
8024                                   "%s_%s",
8025                                   mdname(thread->mddev),
8026                                   name);
8027         if (IS_ERR(thread->tsk)) {
8028                 kfree(thread);
8029                 return NULL;
8030         }
8031         return thread;
8032 }
8033 EXPORT_SYMBOL(md_register_thread);
8034
8035 void md_unregister_thread(struct md_thread **threadp)
8036 {
8037         struct md_thread *thread;
8038
8039         /*
8040          * Locking ensures that mddev_unlock does not wake_up a
8041          * non-existent thread
8042          */
8043         spin_lock(&pers_lock);
8044         thread = *threadp;
8045         if (!thread) {
8046                 spin_unlock(&pers_lock);
8047                 return;
8048         }
8049         *threadp = NULL;
8050         spin_unlock(&pers_lock);
8051
8052         pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
8053         kthread_stop(thread->tsk);
8054         kfree(thread);
8055 }
8056 EXPORT_SYMBOL(md_unregister_thread);
8057
8058 void md_error(struct mddev *mddev, struct md_rdev *rdev)
8059 {
8060         if (!rdev || test_bit(Faulty, &rdev->flags))
8061                 return;
8062
8063         if (!mddev->pers || !mddev->pers->error_handler)
8064                 return;
8065         mddev->pers->error_handler(mddev,rdev);
8066         if (mddev->degraded)
8067                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8068         sysfs_notify_dirent_safe(rdev->sysfs_state);
8069         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8070         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8071         md_wakeup_thread(mddev->thread);
8072         if (mddev->event_work.func)
8073                 queue_work(md_misc_wq, &mddev->event_work);
8074         md_new_event(mddev);
8075 }
8076 EXPORT_SYMBOL(md_error);
8077
8078 /* seq_file implementation /proc/mdstat */
8079
8080 static void status_unused(struct seq_file *seq)
8081 {
8082         int i = 0;
8083         struct md_rdev *rdev;
8084
8085         seq_printf(seq, "unused devices: ");
8086
8087         list_for_each_entry(rdev, &pending_raid_disks, same_set) {
8088                 char b[BDEVNAME_SIZE];
8089                 i++;
8090                 seq_printf(seq, "%s ",
8091                               bdevname(rdev->bdev,b));
8092         }
8093         if (!i)
8094                 seq_printf(seq, "<none>");
8095
8096         seq_printf(seq, "\n");
8097 }
8098
8099 static int status_resync(struct seq_file *seq, struct mddev *mddev)
8100 {
8101         sector_t max_sectors, resync, res;
8102         unsigned long dt, db = 0;
8103         sector_t rt, curr_mark_cnt, resync_mark_cnt;
8104         int scale, recovery_active;
8105         unsigned int per_milli;
8106
8107         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
8108             test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
8109                 max_sectors = mddev->resync_max_sectors;
8110         else
8111                 max_sectors = mddev->dev_sectors;
8112
8113         resync = mddev->curr_resync;
8114         if (resync <= 3) {
8115                 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
8116                         /* Still cleaning up */
8117                         resync = max_sectors;
8118         } else if (resync > max_sectors)
8119                 resync = max_sectors;
8120         else
8121                 resync -= atomic_read(&mddev->recovery_active);
8122
8123         if (resync == 0) {
8124                 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery)) {
8125                         struct md_rdev *rdev;
8126
8127                         rdev_for_each(rdev, mddev)
8128                                 if (rdev->raid_disk >= 0 &&
8129                                     !test_bit(Faulty, &rdev->flags) &&
8130                                     rdev->recovery_offset != MaxSector &&
8131                                     rdev->recovery_offset) {
8132                                         seq_printf(seq, "\trecover=REMOTE");
8133                                         return 1;
8134                                 }
8135                         if (mddev->reshape_position != MaxSector)
8136                                 seq_printf(seq, "\treshape=REMOTE");
8137                         else
8138                                 seq_printf(seq, "\tresync=REMOTE");
8139                         return 1;
8140                 }
8141                 if (mddev->recovery_cp < MaxSector) {
8142                         seq_printf(seq, "\tresync=PENDING");
8143                         return 1;
8144                 }
8145                 return 0;
8146         }
8147         if (resync < 3) {
8148                 seq_printf(seq, "\tresync=DELAYED");
8149                 return 1;
8150         }
8151
8152         WARN_ON(max_sectors == 0);
8153         /* Pick 'scale' such that (resync>>scale)*1000 will fit
8154          * in a sector_t, and (max_sectors>>scale) will fit in a
8155          * u32, as those are the requirements for sector_div.
8156          * Thus 'scale' must be at least 10
8157          */
8158         scale = 10;
8159         if (sizeof(sector_t) > sizeof(unsigned long)) {
8160                 while ( max_sectors/2 > (1ULL<<(scale+32)))
8161                         scale++;
8162         }
8163         res = (resync>>scale)*1000;
8164         sector_div(res, (u32)((max_sectors>>scale)+1));
8165
8166         per_milli = res;
8167         {
8168                 int i, x = per_milli/50, y = 20-x;
8169                 seq_printf(seq, "[");
8170                 for (i = 0; i < x; i++)
8171                         seq_printf(seq, "=");
8172                 seq_printf(seq, ">");
8173                 for (i = 0; i < y; i++)
8174                         seq_printf(seq, ".");
8175                 seq_printf(seq, "] ");
8176         }
8177         seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
8178                    (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
8179                     "reshape" :
8180                     (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
8181                      "check" :
8182                      (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
8183                       "resync" : "recovery"))),
8184                    per_milli/10, per_milli % 10,
8185                    (unsigned long long) resync/2,
8186                    (unsigned long long) max_sectors/2);
8187
8188         /*
8189          * dt: time from mark until now
8190          * db: blocks written from mark until now
8191          * rt: remaining time
8192          *
8193          * rt is a sector_t, which is always 64bit now. We are keeping
8194          * the original algorithm, but it is not really necessary.
8195          *
8196          * Original algorithm:
8197          *   So we divide before multiply in case it is 32bit and close
8198          *   to the limit.
8199          *   We scale the divisor (db) by 32 to avoid losing precision
8200          *   near the end of resync when the number of remaining sectors
8201          *   is close to 'db'.
8202          *   We then divide rt by 32 after multiplying by db to compensate.
8203          *   The '+1' avoids division by zero if db is very small.
8204          */
8205         dt = ((jiffies - mddev->resync_mark) / HZ);
8206         if (!dt) dt++;
8207
8208         curr_mark_cnt = mddev->curr_mark_cnt;
8209         recovery_active = atomic_read(&mddev->recovery_active);
8210         resync_mark_cnt = mddev->resync_mark_cnt;
8211
8212         if (curr_mark_cnt >= (recovery_active + resync_mark_cnt))
8213                 db = curr_mark_cnt - (recovery_active + resync_mark_cnt);
8214
8215         rt = max_sectors - resync;    /* number of remaining sectors */
8216         rt = div64_u64(rt, db/32+1);
8217         rt *= dt;
8218         rt >>= 5;
8219
8220         seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
8221                    ((unsigned long)rt % 60)/6);
8222
8223         seq_printf(seq, " speed=%ldK/sec", db/2/dt);
8224         return 1;
8225 }
8226
8227 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
8228 {
8229         struct list_head *tmp;
8230         loff_t l = *pos;
8231         struct mddev *mddev;
8232
8233         if (l == 0x10000) {
8234                 ++*pos;
8235                 return (void *)2;
8236         }
8237         if (l > 0x10000)
8238                 return NULL;
8239         if (!l--)
8240                 /* header */
8241                 return (void*)1;
8242
8243         spin_lock(&all_mddevs_lock);
8244         list_for_each(tmp,&all_mddevs)
8245                 if (!l--) {
8246                         mddev = list_entry(tmp, struct mddev, all_mddevs);
8247                         mddev_get(mddev);
8248                         spin_unlock(&all_mddevs_lock);
8249                         return mddev;
8250                 }
8251         spin_unlock(&all_mddevs_lock);
8252         if (!l--)
8253                 return (void*)2;/* tail */
8254         return NULL;
8255 }
8256
8257 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
8258 {
8259         struct list_head *tmp;
8260         struct mddev *next_mddev, *mddev = v;
8261
8262         ++*pos;
8263         if (v == (void*)2)
8264                 return NULL;
8265
8266         spin_lock(&all_mddevs_lock);
8267         if (v == (void*)1)
8268                 tmp = all_mddevs.next;
8269         else
8270                 tmp = mddev->all_mddevs.next;
8271         if (tmp != &all_mddevs)
8272                 next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs));
8273         else {
8274                 next_mddev = (void*)2;
8275                 *pos = 0x10000;
8276         }
8277         spin_unlock(&all_mddevs_lock);
8278
8279         if (v != (void*)1)
8280                 mddev_put(mddev);
8281         return next_mddev;
8282
8283 }
8284
8285 static void md_seq_stop(struct seq_file *seq, void *v)
8286 {
8287         struct mddev *mddev = v;
8288
8289         if (mddev && v != (void*)1 && v != (void*)2)
8290                 mddev_put(mddev);
8291 }
8292
8293 static int md_seq_show(struct seq_file *seq, void *v)
8294 {
8295         struct mddev *mddev = v;
8296         sector_t sectors;
8297         struct md_rdev *rdev;
8298
8299         if (v == (void*)1) {
8300                 struct md_personality *pers;
8301                 seq_printf(seq, "Personalities : ");
8302                 spin_lock(&pers_lock);
8303                 list_for_each_entry(pers, &pers_list, list)
8304                         seq_printf(seq, "[%s] ", pers->name);
8305
8306                 spin_unlock(&pers_lock);
8307                 seq_printf(seq, "\n");
8308                 seq->poll_event = atomic_read(&md_event_count);
8309                 return 0;
8310         }
8311         if (v == (void*)2) {
8312                 status_unused(seq);
8313                 return 0;
8314         }
8315
8316         spin_lock(&mddev->lock);
8317         if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
8318                 seq_printf(seq, "%s : %sactive", mdname(mddev),
8319                                                 mddev->pers ? "" : "in");
8320                 if (mddev->pers) {
8321                         if (mddev->ro==1)
8322                                 seq_printf(seq, " (read-only)");
8323                         if (mddev->ro==2)
8324                                 seq_printf(seq, " (auto-read-only)");
8325                         seq_printf(seq, " %s", mddev->pers->name);
8326                 }
8327
8328                 sectors = 0;
8329                 rcu_read_lock();
8330                 rdev_for_each_rcu(rdev, mddev) {
8331                         char b[BDEVNAME_SIZE];
8332                         seq_printf(seq, " %s[%d]",
8333                                 bdevname(rdev->bdev,b), rdev->desc_nr);
8334                         if (test_bit(WriteMostly, &rdev->flags))
8335                                 seq_printf(seq, "(W)");
8336                         if (test_bit(Journal, &rdev->flags))
8337                                 seq_printf(seq, "(J)");
8338                         if (test_bit(Faulty, &rdev->flags)) {
8339                                 seq_printf(seq, "(F)");
8340                                 continue;
8341                         }
8342                         if (rdev->raid_disk < 0)
8343                                 seq_printf(seq, "(S)"); /* spare */
8344                         if (test_bit(Replacement, &rdev->flags))
8345                                 seq_printf(seq, "(R)");
8346                         sectors += rdev->sectors;
8347                 }
8348                 rcu_read_unlock();
8349
8350                 if (!list_empty(&mddev->disks)) {
8351                         if (mddev->pers)
8352                                 seq_printf(seq, "\n      %llu blocks",
8353                                            (unsigned long long)
8354                                            mddev->array_sectors / 2);
8355                         else
8356                                 seq_printf(seq, "\n      %llu blocks",
8357                                            (unsigned long long)sectors / 2);
8358                 }
8359                 if (mddev->persistent) {
8360                         if (mddev->major_version != 0 ||
8361                             mddev->minor_version != 90) {
8362                                 seq_printf(seq," super %d.%d",
8363                                            mddev->major_version,
8364                                            mddev->minor_version);
8365                         }
8366                 } else if (mddev->external)
8367                         seq_printf(seq, " super external:%s",
8368                                    mddev->metadata_type);
8369                 else
8370                         seq_printf(seq, " super non-persistent");
8371
8372                 if (mddev->pers) {
8373                         mddev->pers->status(seq, mddev);
8374                         seq_printf(seq, "\n      ");
8375                         if (mddev->pers->sync_request) {
8376                                 if (status_resync(seq, mddev))
8377                                         seq_printf(seq, "\n      ");
8378                         }
8379                 } else
8380                         seq_printf(seq, "\n       ");
8381
8382                 md_bitmap_status(seq, mddev->bitmap);
8383
8384                 seq_printf(seq, "\n");
8385         }
8386         spin_unlock(&mddev->lock);
8387
8388         return 0;
8389 }
8390
8391 static const struct seq_operations md_seq_ops = {
8392         .start  = md_seq_start,
8393         .next   = md_seq_next,
8394         .stop   = md_seq_stop,
8395         .show   = md_seq_show,
8396 };
8397
8398 static int md_seq_open(struct inode *inode, struct file *file)
8399 {
8400         struct seq_file *seq;
8401         int error;
8402
8403         error = seq_open(file, &md_seq_ops);
8404         if (error)
8405                 return error;
8406
8407         seq = file->private_data;
8408         seq->poll_event = atomic_read(&md_event_count);
8409         return error;
8410 }
8411
8412 static int md_unloading;
8413 static __poll_t mdstat_poll(struct file *filp, poll_table *wait)
8414 {
8415         struct seq_file *seq = filp->private_data;
8416         __poll_t mask;
8417
8418         if (md_unloading)
8419                 return EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI;
8420         poll_wait(filp, &md_event_waiters, wait);
8421
8422         /* always allow read */
8423         mask = EPOLLIN | EPOLLRDNORM;
8424
8425         if (seq->poll_event != atomic_read(&md_event_count))
8426                 mask |= EPOLLERR | EPOLLPRI;
8427         return mask;
8428 }
8429
8430 static const struct proc_ops mdstat_proc_ops = {
8431         .proc_open      = md_seq_open,
8432         .proc_read      = seq_read,
8433         .proc_lseek     = seq_lseek,
8434         .proc_release   = seq_release,
8435         .proc_poll      = mdstat_poll,
8436 };
8437
8438 int register_md_personality(struct md_personality *p)
8439 {
8440         pr_debug("md: %s personality registered for level %d\n",
8441                  p->name, p->level);
8442         spin_lock(&pers_lock);
8443         list_add_tail(&p->list, &pers_list);
8444         spin_unlock(&pers_lock);
8445         return 0;
8446 }
8447 EXPORT_SYMBOL(register_md_personality);
8448
8449 int unregister_md_personality(struct md_personality *p)
8450 {
8451         pr_debug("md: %s personality unregistered\n", p->name);
8452         spin_lock(&pers_lock);
8453         list_del_init(&p->list);
8454         spin_unlock(&pers_lock);
8455         return 0;
8456 }
8457 EXPORT_SYMBOL(unregister_md_personality);
8458
8459 int register_md_cluster_operations(struct md_cluster_operations *ops,
8460                                    struct module *module)
8461 {
8462         int ret = 0;
8463         spin_lock(&pers_lock);
8464         if (md_cluster_ops != NULL)
8465                 ret = -EALREADY;
8466         else {
8467                 md_cluster_ops = ops;
8468                 md_cluster_mod = module;
8469         }
8470         spin_unlock(&pers_lock);
8471         return ret;
8472 }
8473 EXPORT_SYMBOL(register_md_cluster_operations);
8474
8475 int unregister_md_cluster_operations(void)
8476 {
8477         spin_lock(&pers_lock);
8478         md_cluster_ops = NULL;
8479         spin_unlock(&pers_lock);
8480         return 0;
8481 }
8482 EXPORT_SYMBOL(unregister_md_cluster_operations);
8483
8484 int md_setup_cluster(struct mddev *mddev, int nodes)
8485 {
8486         int ret;
8487         if (!md_cluster_ops)
8488                 request_module("md-cluster");
8489         spin_lock(&pers_lock);
8490         /* ensure module won't be unloaded */
8491         if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
8492                 pr_warn("can't find md-cluster module or get it's reference.\n");
8493                 spin_unlock(&pers_lock);
8494                 return -ENOENT;
8495         }
8496         spin_unlock(&pers_lock);
8497
8498         ret = md_cluster_ops->join(mddev, nodes);
8499         if (!ret)
8500                 mddev->safemode_delay = 0;
8501         return ret;
8502 }
8503
8504 void md_cluster_stop(struct mddev *mddev)
8505 {
8506         if (!md_cluster_ops)
8507                 return;
8508         md_cluster_ops->leave(mddev);
8509         module_put(md_cluster_mod);
8510 }
8511
8512 static int is_mddev_idle(struct mddev *mddev, int init)
8513 {
8514         struct md_rdev *rdev;
8515         int idle;
8516         int curr_events;
8517
8518         idle = 1;
8519         rcu_read_lock();
8520         rdev_for_each_rcu(rdev, mddev) {
8521                 struct gendisk *disk = rdev->bdev->bd_disk;
8522                 curr_events = (int)part_stat_read_accum(&disk->part0, sectors) -
8523                               atomic_read(&disk->sync_io);
8524                 /* sync IO will cause sync_io to increase before the disk_stats
8525                  * as sync_io is counted when a request starts, and
8526                  * disk_stats is counted when it completes.
8527                  * So resync activity will cause curr_events to be smaller than
8528                  * when there was no such activity.
8529                  * non-sync IO will cause disk_stat to increase without
8530                  * increasing sync_io so curr_events will (eventually)
8531                  * be larger than it was before.  Once it becomes
8532                  * substantially larger, the test below will cause
8533                  * the array to appear non-idle, and resync will slow
8534                  * down.
8535                  * If there is a lot of outstanding resync activity when
8536                  * we set last_event to curr_events, then all that activity
8537                  * completing might cause the array to appear non-idle
8538                  * and resync will be slowed down even though there might
8539                  * not have been non-resync activity.  This will only
8540                  * happen once though.  'last_events' will soon reflect
8541                  * the state where there is little or no outstanding
8542                  * resync requests, and further resync activity will
8543                  * always make curr_events less than last_events.
8544                  *
8545                  */
8546                 if (init || curr_events - rdev->last_events > 64) {
8547                         rdev->last_events = curr_events;
8548                         idle = 0;
8549                 }
8550         }
8551         rcu_read_unlock();
8552         return idle;
8553 }
8554
8555 void md_done_sync(struct mddev *mddev, int blocks, int ok)
8556 {
8557         /* another "blocks" (512byte) blocks have been synced */
8558         atomic_sub(blocks, &mddev->recovery_active);
8559         wake_up(&mddev->recovery_wait);
8560         if (!ok) {
8561                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8562                 set_bit(MD_RECOVERY_ERROR, &mddev->recovery);
8563                 md_wakeup_thread(mddev->thread);
8564                 // stop recovery, signal do_sync ....
8565         }
8566 }
8567 EXPORT_SYMBOL(md_done_sync);
8568
8569 /* md_write_start(mddev, bi)
8570  * If we need to update some array metadata (e.g. 'active' flag
8571  * in superblock) before writing, schedule a superblock update
8572  * and wait for it to complete.
8573  * A return value of 'false' means that the write wasn't recorded
8574  * and cannot proceed as the array is being suspend.
8575  */
8576 bool md_write_start(struct mddev *mddev, struct bio *bi)
8577 {
8578         int did_change = 0;
8579
8580         if (bio_data_dir(bi) != WRITE)
8581                 return true;
8582
8583         BUG_ON(mddev->ro == 1);
8584         if (mddev->ro == 2) {
8585                 /* need to switch to read/write */
8586                 mddev->ro = 0;
8587                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8588                 md_wakeup_thread(mddev->thread);
8589                 md_wakeup_thread(mddev->sync_thread);
8590                 did_change = 1;
8591         }
8592         rcu_read_lock();
8593         percpu_ref_get(&mddev->writes_pending);
8594         smp_mb(); /* Match smp_mb in set_in_sync() */
8595         if (mddev->safemode == 1)
8596                 mddev->safemode = 0;
8597         /* sync_checkers is always 0 when writes_pending is in per-cpu mode */
8598         if (mddev->in_sync || mddev->sync_checkers) {
8599                 spin_lock(&mddev->lock);
8600                 if (mddev->in_sync) {
8601                         mddev->in_sync = 0;
8602                         set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
8603                         set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
8604                         md_wakeup_thread(mddev->thread);
8605                         did_change = 1;
8606                 }
8607                 spin_unlock(&mddev->lock);
8608         }
8609         rcu_read_unlock();
8610         if (did_change)
8611                 sysfs_notify_dirent_safe(mddev->sysfs_state);
8612         if (!mddev->has_superblocks)
8613                 return true;
8614         wait_event(mddev->sb_wait,
8615                    !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) ||
8616                    mddev->suspended);
8617         if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
8618                 percpu_ref_put(&mddev->writes_pending);
8619                 return false;
8620         }
8621         return true;
8622 }
8623 EXPORT_SYMBOL(md_write_start);
8624
8625 /* md_write_inc can only be called when md_write_start() has
8626  * already been called at least once of the current request.
8627  * It increments the counter and is useful when a single request
8628  * is split into several parts.  Each part causes an increment and
8629  * so needs a matching md_write_end().
8630  * Unlike md_write_start(), it is safe to call md_write_inc() inside
8631  * a spinlocked region.
8632  */
8633 void md_write_inc(struct mddev *mddev, struct bio *bi)
8634 {
8635         if (bio_data_dir(bi) != WRITE)
8636                 return;
8637         WARN_ON_ONCE(mddev->in_sync || mddev->ro);
8638         percpu_ref_get(&mddev->writes_pending);
8639 }
8640 EXPORT_SYMBOL(md_write_inc);
8641
8642 void md_write_end(struct mddev *mddev)
8643 {
8644         percpu_ref_put(&mddev->writes_pending);
8645
8646         if (mddev->safemode == 2)
8647                 md_wakeup_thread(mddev->thread);
8648         else if (mddev->safemode_delay)
8649                 /* The roundup() ensures this only performs locking once
8650                  * every ->safemode_delay jiffies
8651                  */
8652                 mod_timer(&mddev->safemode_timer,
8653                           roundup(jiffies, mddev->safemode_delay) +
8654                           mddev->safemode_delay);
8655 }
8656
8657 EXPORT_SYMBOL(md_write_end);
8658
8659 /* md_allow_write(mddev)
8660  * Calling this ensures that the array is marked 'active' so that writes
8661  * may proceed without blocking.  It is important to call this before
8662  * attempting a GFP_KERNEL allocation while holding the mddev lock.
8663  * Must be called with mddev_lock held.
8664  */
8665 void md_allow_write(struct mddev *mddev)
8666 {
8667         if (!mddev->pers)
8668                 return;
8669         if (mddev->ro)
8670                 return;
8671         if (!mddev->pers->sync_request)
8672                 return;
8673
8674         spin_lock(&mddev->lock);
8675         if (mddev->in_sync) {
8676                 mddev->in_sync = 0;
8677                 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
8678                 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
8679                 if (mddev->safemode_delay &&
8680                     mddev->safemode == 0)
8681                         mddev->safemode = 1;
8682                 spin_unlock(&mddev->lock);
8683                 md_update_sb(mddev, 0);
8684                 sysfs_notify_dirent_safe(mddev->sysfs_state);
8685                 /* wait for the dirty state to be recorded in the metadata */
8686                 wait_event(mddev->sb_wait,
8687                            !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
8688         } else
8689                 spin_unlock(&mddev->lock);
8690 }
8691 EXPORT_SYMBOL_GPL(md_allow_write);
8692
8693 #define SYNC_MARKS      10
8694 #define SYNC_MARK_STEP  (3*HZ)
8695 #define UPDATE_FREQUENCY (5*60*HZ)
8696 void md_do_sync(struct md_thread *thread)
8697 {
8698         struct mddev *mddev = thread->mddev;
8699         struct mddev *mddev2;
8700         unsigned int currspeed = 0, window;
8701         sector_t max_sectors,j, io_sectors, recovery_done;
8702         unsigned long mark[SYNC_MARKS];
8703         unsigned long update_time;
8704         sector_t mark_cnt[SYNC_MARKS];
8705         int last_mark,m;
8706         struct list_head *tmp;
8707         sector_t last_check;
8708         int skipped = 0;
8709         struct md_rdev *rdev;
8710         char *desc, *action = NULL;
8711         struct blk_plug plug;
8712         int ret;
8713
8714         /* just incase thread restarts... */
8715         if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
8716             test_bit(MD_RECOVERY_WAIT, &mddev->recovery))
8717                 return;
8718         if (mddev->ro) {/* never try to sync a read-only array */
8719                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8720                 return;
8721         }
8722
8723         if (mddev_is_clustered(mddev)) {
8724                 ret = md_cluster_ops->resync_start(mddev);
8725                 if (ret)
8726                         goto skip;
8727
8728                 set_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags);
8729                 if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
8730                         test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ||
8731                         test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
8732                      && ((unsigned long long)mddev->curr_resync_completed
8733                          < (unsigned long long)mddev->resync_max_sectors))
8734                         goto skip;
8735         }
8736
8737         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
8738                 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
8739                         desc = "data-check";
8740                         action = "check";
8741                 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
8742                         desc = "requested-resync";
8743                         action = "repair";
8744                 } else
8745                         desc = "resync";
8746         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
8747                 desc = "reshape";
8748         else
8749                 desc = "recovery";
8750
8751         mddev->last_sync_action = action ?: desc;
8752
8753         /* we overload curr_resync somewhat here.
8754          * 0 == not engaged in resync at all
8755          * 2 == checking that there is no conflict with another sync
8756          * 1 == like 2, but have yielded to allow conflicting resync to
8757          *              commence
8758          * other == active in resync - this many blocks
8759          *
8760          * Before starting a resync we must have set curr_resync to
8761          * 2, and then checked that every "conflicting" array has curr_resync
8762          * less than ours.  When we find one that is the same or higher
8763          * we wait on resync_wait.  To avoid deadlock, we reduce curr_resync
8764          * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
8765          * This will mean we have to start checking from the beginning again.
8766          *
8767          */
8768
8769         do {
8770                 int mddev2_minor = -1;
8771                 mddev->curr_resync = 2;
8772
8773         try_again:
8774                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8775                         goto skip;
8776                 for_each_mddev(mddev2, tmp) {
8777                         if (mddev2 == mddev)
8778                                 continue;
8779                         if (!mddev->parallel_resync
8780                         &&  mddev2->curr_resync
8781                         &&  match_mddev_units(mddev, mddev2)) {
8782                                 DEFINE_WAIT(wq);
8783                                 if (mddev < mddev2 && mddev->curr_resync == 2) {
8784                                         /* arbitrarily yield */
8785                                         mddev->curr_resync = 1;
8786                                         wake_up(&resync_wait);
8787                                 }
8788                                 if (mddev > mddev2 && mddev->curr_resync == 1)
8789                                         /* no need to wait here, we can wait the next
8790                                          * time 'round when curr_resync == 2
8791                                          */
8792                                         continue;
8793                                 /* We need to wait 'interruptible' so as not to
8794                                  * contribute to the load average, and not to
8795                                  * be caught by 'softlockup'
8796                                  */
8797                                 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
8798                                 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
8799                                     mddev2->curr_resync >= mddev->curr_resync) {
8800                                         if (mddev2_minor != mddev2->md_minor) {
8801                                                 mddev2_minor = mddev2->md_minor;
8802                                                 pr_info("md: delaying %s of %s until %s has finished (they share one or more physical units)\n",
8803                                                         desc, mdname(mddev),
8804                                                         mdname(mddev2));
8805                                         }
8806                                         mddev_put(mddev2);
8807                                         if (signal_pending(current))
8808                                                 flush_signals(current);
8809                                         schedule();
8810                                         finish_wait(&resync_wait, &wq);
8811                                         goto try_again;
8812                                 }
8813                                 finish_wait(&resync_wait, &wq);
8814                         }
8815                 }
8816         } while (mddev->curr_resync < 2);
8817
8818         j = 0;
8819         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
8820                 /* resync follows the size requested by the personality,
8821                  * which defaults to physical size, but can be virtual size
8822                  */
8823                 max_sectors = mddev->resync_max_sectors;
8824                 atomic64_set(&mddev->resync_mismatches, 0);
8825                 /* we don't use the checkpoint if there's a bitmap */
8826                 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
8827                         j = mddev->resync_min;
8828                 else if (!mddev->bitmap)
8829                         j = mddev->recovery_cp;
8830
8831         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
8832                 max_sectors = mddev->resync_max_sectors;
8833                 /*
8834                  * If the original node aborts reshaping then we continue the
8835                  * reshaping, so set j again to avoid restart reshape from the
8836                  * first beginning
8837                  */
8838                 if (mddev_is_clustered(mddev) &&
8839                     mddev->reshape_position != MaxSector)
8840                         j = mddev->reshape_position;
8841         } else {
8842                 /* recovery follows the physical size of devices */
8843                 max_sectors = mddev->dev_sectors;
8844                 j = MaxSector;
8845                 rcu_read_lock();
8846                 rdev_for_each_rcu(rdev, mddev)
8847                         if (rdev->raid_disk >= 0 &&
8848                             !test_bit(Journal, &rdev->flags) &&
8849                             !test_bit(Faulty, &rdev->flags) &&
8850                             !test_bit(In_sync, &rdev->flags) &&
8851                             rdev->recovery_offset < j)
8852                                 j = rdev->recovery_offset;
8853                 rcu_read_unlock();
8854
8855                 /* If there is a bitmap, we need to make sure all
8856                  * writes that started before we added a spare
8857                  * complete before we start doing a recovery.
8858                  * Otherwise the write might complete and (via
8859                  * bitmap_endwrite) set a bit in the bitmap after the
8860                  * recovery has checked that bit and skipped that
8861                  * region.
8862                  */
8863                 if (mddev->bitmap) {
8864                         mddev->pers->quiesce(mddev, 1);
8865                         mddev->pers->quiesce(mddev, 0);
8866                 }
8867         }
8868
8869         pr_info("md: %s of RAID array %s\n", desc, mdname(mddev));
8870         pr_debug("md: minimum _guaranteed_  speed: %d KB/sec/disk.\n", speed_min(mddev));
8871         pr_debug("md: using maximum available idle IO bandwidth (but not more than %d KB/sec) for %s.\n",
8872                  speed_max(mddev), desc);
8873
8874         is_mddev_idle(mddev, 1); /* this initializes IO event counters */
8875
8876         io_sectors = 0;
8877         for (m = 0; m < SYNC_MARKS; m++) {
8878                 mark[m] = jiffies;
8879                 mark_cnt[m] = io_sectors;
8880         }
8881         last_mark = 0;
8882         mddev->resync_mark = mark[last_mark];
8883         mddev->resync_mark_cnt = mark_cnt[last_mark];
8884
8885         /*
8886          * Tune reconstruction:
8887          */
8888         window = 32 * (PAGE_SIZE / 512);
8889         pr_debug("md: using %dk window, over a total of %lluk.\n",
8890                  window/2, (unsigned long long)max_sectors/2);
8891
8892         atomic_set(&mddev->recovery_active, 0);
8893         last_check = 0;
8894
8895         if (j>2) {
8896                 pr_debug("md: resuming %s of %s from checkpoint.\n",
8897                          desc, mdname(mddev));
8898                 mddev->curr_resync = j;
8899         } else
8900                 mddev->curr_resync = 3; /* no longer delayed */
8901         mddev->curr_resync_completed = j;
8902         sysfs_notify_dirent_safe(mddev->sysfs_completed);
8903         md_new_event(mddev);
8904         update_time = jiffies;
8905
8906         blk_start_plug(&plug);
8907         while (j < max_sectors) {
8908                 sector_t sectors;
8909
8910                 skipped = 0;
8911
8912                 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8913                     ((mddev->curr_resync > mddev->curr_resync_completed &&
8914                       (mddev->curr_resync - mddev->curr_resync_completed)
8915                       > (max_sectors >> 4)) ||
8916                      time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) ||
8917                      (j - mddev->curr_resync_completed)*2
8918                      >= mddev->resync_max - mddev->curr_resync_completed ||
8919                      mddev->curr_resync_completed > mddev->resync_max
8920                             )) {
8921                         /* time to update curr_resync_completed */
8922                         wait_event(mddev->recovery_wait,
8923                                    atomic_read(&mddev->recovery_active) == 0);
8924                         mddev->curr_resync_completed = j;
8925                         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
8926                             j > mddev->recovery_cp)
8927                                 mddev->recovery_cp = j;
8928                         update_time = jiffies;
8929                         set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
8930                         sysfs_notify_dirent_safe(mddev->sysfs_completed);
8931                 }
8932
8933                 while (j >= mddev->resync_max &&
8934                        !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8935                         /* As this condition is controlled by user-space,
8936                          * we can block indefinitely, so use '_interruptible'
8937                          * to avoid triggering warnings.
8938                          */
8939                         flush_signals(current); /* just in case */
8940                         wait_event_interruptible(mddev->recovery_wait,
8941                                                  mddev->resync_max > j
8942                                                  || test_bit(MD_RECOVERY_INTR,
8943                                                              &mddev->recovery));
8944                 }
8945
8946                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8947                         break;
8948
8949                 sectors = mddev->pers->sync_request(mddev, j, &skipped);
8950                 if (sectors == 0) {
8951                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8952                         break;
8953                 }
8954
8955                 if (!skipped) { /* actual IO requested */
8956                         io_sectors += sectors;
8957                         atomic_add(sectors, &mddev->recovery_active);
8958                 }
8959
8960                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8961                         break;
8962
8963                 j += sectors;
8964                 if (j > max_sectors)
8965                         /* when skipping, extra large numbers can be returned. */
8966                         j = max_sectors;
8967                 if (j > 2)
8968                         mddev->curr_resync = j;
8969                 mddev->curr_mark_cnt = io_sectors;
8970                 if (last_check == 0)
8971                         /* this is the earliest that rebuild will be
8972                          * visible in /proc/mdstat
8973                          */
8974                         md_new_event(mddev);
8975
8976                 if (last_check + window > io_sectors || j == max_sectors)
8977                         continue;
8978
8979                 last_check = io_sectors;
8980         repeat:
8981                 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
8982                         /* step marks */
8983                         int next = (last_mark+1) % SYNC_MARKS;
8984
8985                         mddev->resync_mark = mark[next];
8986                         mddev->resync_mark_cnt = mark_cnt[next];
8987                         mark[next] = jiffies;
8988                         mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
8989                         last_mark = next;
8990                 }
8991
8992                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8993                         break;
8994
8995                 /*
8996                  * this loop exits only if either when we are slower than
8997                  * the 'hard' speed limit, or the system was IO-idle for
8998                  * a jiffy.
8999                  * the system might be non-idle CPU-wise, but we only care
9000                  * about not overloading the IO subsystem. (things like an
9001                  * e2fsck being done on the RAID array should execute fast)
9002                  */
9003                 cond_resched();
9004
9005                 recovery_done = io_sectors - atomic_read(&mddev->recovery_active);
9006                 currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2
9007                         /((jiffies-mddev->resync_mark)/HZ +1) +1;
9008
9009                 if (currspeed > speed_min(mddev)) {
9010                         if (currspeed > speed_max(mddev)) {
9011                                 msleep(500);
9012                                 goto repeat;
9013                         }
9014                         if (!is_mddev_idle(mddev, 0)) {
9015                                 /*
9016                                  * Give other IO more of a chance.
9017                                  * The faster the devices, the less we wait.
9018                                  */
9019                                 wait_event(mddev->recovery_wait,
9020                                            !atomic_read(&mddev->recovery_active));
9021                         }
9022                 }
9023         }
9024         pr_info("md: %s: %s %s.\n",mdname(mddev), desc,
9025                 test_bit(MD_RECOVERY_INTR, &mddev->recovery)
9026                 ? "interrupted" : "done");
9027         /*
9028          * this also signals 'finished resyncing' to md_stop
9029          */
9030         blk_finish_plug(&plug);
9031         wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
9032
9033         if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
9034             !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
9035             mddev->curr_resync > 3) {
9036                 mddev->curr_resync_completed = mddev->curr_resync;
9037                 sysfs_notify_dirent_safe(mddev->sysfs_completed);
9038         }
9039         mddev->pers->sync_request(mddev, max_sectors, &skipped);
9040
9041         if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
9042             mddev->curr_resync > 3) {
9043                 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
9044                         if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
9045                                 if (mddev->curr_resync >= mddev->recovery_cp) {
9046                                         pr_debug("md: checkpointing %s of %s.\n",
9047                                                  desc, mdname(mddev));
9048                                         if (test_bit(MD_RECOVERY_ERROR,
9049                                                 &mddev->recovery))
9050                                                 mddev->recovery_cp =
9051                                                         mddev->curr_resync_completed;
9052                                         else
9053                                                 mddev->recovery_cp =
9054                                                         mddev->curr_resync;
9055                                 }
9056                         } else
9057                                 mddev->recovery_cp = MaxSector;
9058                 } else {
9059                         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
9060                                 mddev->curr_resync = MaxSector;
9061                         if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
9062                             test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) {
9063                                 rcu_read_lock();
9064                                 rdev_for_each_rcu(rdev, mddev)
9065                                         if (rdev->raid_disk >= 0 &&
9066                                             mddev->delta_disks >= 0 &&
9067                                             !test_bit(Journal, &rdev->flags) &&
9068                                             !test_bit(Faulty, &rdev->flags) &&
9069                                             !test_bit(In_sync, &rdev->flags) &&
9070                                             rdev->recovery_offset < mddev->curr_resync)
9071                                                 rdev->recovery_offset = mddev->curr_resync;
9072                                 rcu_read_unlock();
9073                         }
9074                 }
9075         }
9076  skip:
9077         /* set CHANGE_PENDING here since maybe another update is needed,
9078          * so other nodes are informed. It should be harmless for normal
9079          * raid */
9080         set_mask_bits(&mddev->sb_flags, 0,
9081                       BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS));
9082
9083         if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
9084                         !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
9085                         mddev->delta_disks > 0 &&
9086                         mddev->pers->finish_reshape &&
9087                         mddev->pers->size &&
9088                         mddev->queue) {
9089                 mddev_lock_nointr(mddev);
9090                 md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
9091                 mddev_unlock(mddev);
9092                 if (!mddev_is_clustered(mddev)) {
9093                         set_capacity(mddev->gendisk, mddev->array_sectors);
9094                         revalidate_disk_size(mddev->gendisk, true);
9095                 }
9096         }
9097
9098         spin_lock(&mddev->lock);
9099         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
9100                 /* We completed so min/max setting can be forgotten if used. */
9101                 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
9102                         mddev->resync_min = 0;
9103                 mddev->resync_max = MaxSector;
9104         } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
9105                 mddev->resync_min = mddev->curr_resync_completed;
9106         set_bit(MD_RECOVERY_DONE, &mddev->recovery);
9107         mddev->curr_resync = 0;
9108         spin_unlock(&mddev->lock);
9109
9110         wake_up(&resync_wait);
9111         md_wakeup_thread(mddev->thread);
9112         return;
9113 }
9114 EXPORT_SYMBOL_GPL(md_do_sync);
9115
9116 static int remove_and_add_spares(struct mddev *mddev,
9117                                  struct md_rdev *this)
9118 {
9119         struct md_rdev *rdev;
9120         int spares = 0;
9121         int removed = 0;
9122         bool remove_some = false;
9123
9124         if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
9125                 /* Mustn't remove devices when resync thread is running */
9126                 return 0;
9127
9128         rdev_for_each(rdev, mddev) {
9129                 if ((this == NULL || rdev == this) &&
9130                     rdev->raid_disk >= 0 &&
9131                     !test_bit(Blocked, &rdev->flags) &&
9132                     test_bit(Faulty, &rdev->flags) &&
9133                     atomic_read(&rdev->nr_pending)==0) {
9134                         /* Faulty non-Blocked devices with nr_pending == 0
9135                          * never get nr_pending incremented,
9136                          * never get Faulty cleared, and never get Blocked set.
9137                          * So we can synchronize_rcu now rather than once per device
9138                          */
9139                         remove_some = true;
9140                         set_bit(RemoveSynchronized, &rdev->flags);
9141                 }
9142         }
9143
9144         if (remove_some)
9145                 synchronize_rcu();
9146         rdev_for_each(rdev, mddev) {
9147                 if ((this == NULL || rdev == this) &&
9148                     rdev->raid_disk >= 0 &&
9149                     !test_bit(Blocked, &rdev->flags) &&
9150                     ((test_bit(RemoveSynchronized, &rdev->flags) ||
9151                      (!test_bit(In_sync, &rdev->flags) &&
9152                       !test_bit(Journal, &rdev->flags))) &&
9153                     atomic_read(&rdev->nr_pending)==0)) {
9154                         if (mddev->pers->hot_remove_disk(
9155                                     mddev, rdev) == 0) {
9156                                 sysfs_unlink_rdev(mddev, rdev);
9157                                 rdev->saved_raid_disk = rdev->raid_disk;
9158                                 rdev->raid_disk = -1;
9159                                 removed++;
9160                         }
9161                 }
9162                 if (remove_some && test_bit(RemoveSynchronized, &rdev->flags))
9163                         clear_bit(RemoveSynchronized, &rdev->flags);
9164         }
9165
9166         if (removed && mddev->kobj.sd)
9167                 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
9168
9169         if (this && removed)
9170                 goto no_add;
9171
9172         rdev_for_each(rdev, mddev) {
9173                 if (this && this != rdev)
9174                         continue;
9175                 if (test_bit(Candidate, &rdev->flags))
9176                         continue;
9177                 if (rdev->raid_disk >= 0 &&
9178                     !test_bit(In_sync, &rdev->flags) &&
9179                     !test_bit(Journal, &rdev->flags) &&
9180                     !test_bit(Faulty, &rdev->flags))
9181                         spares++;
9182                 if (rdev->raid_disk >= 0)
9183                         continue;
9184                 if (test_bit(Faulty, &rdev->flags))
9185                         continue;
9186                 if (!test_bit(Journal, &rdev->flags)) {
9187                         if (mddev->ro &&
9188                             ! (rdev->saved_raid_disk >= 0 &&
9189                                !test_bit(Bitmap_sync, &rdev->flags)))
9190                                 continue;
9191
9192                         rdev->recovery_offset = 0;
9193                 }
9194                 if (mddev->pers->hot_add_disk(mddev, rdev) == 0) {
9195                         /* failure here is OK */
9196                         sysfs_link_rdev(mddev, rdev);
9197                         if (!test_bit(Journal, &rdev->flags))
9198                                 spares++;
9199                         md_new_event(mddev);
9200                         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
9201                 }
9202         }
9203 no_add:
9204         if (removed)
9205                 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
9206         return spares;
9207 }
9208
9209 static void md_start_sync(struct work_struct *ws)
9210 {
9211         struct mddev *mddev = container_of(ws, struct mddev, del_work);
9212
9213         mddev->sync_thread = md_register_thread(md_do_sync,
9214                                                 mddev,
9215                                                 "resync");
9216         if (!mddev->sync_thread) {
9217                 pr_warn("%s: could not start resync thread...\n",
9218                         mdname(mddev));
9219                 /* leave the spares where they are, it shouldn't hurt */
9220                 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9221                 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
9222                 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
9223                 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
9224                 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
9225                 wake_up(&resync_wait);
9226                 if (test_and_clear_bit(MD_RECOVERY_RECOVER,
9227                                        &mddev->recovery))
9228                         if (mddev->sysfs_action)
9229                                 sysfs_notify_dirent_safe(mddev->sysfs_action);
9230         } else
9231                 md_wakeup_thread(mddev->sync_thread);
9232         sysfs_notify_dirent_safe(mddev->sysfs_action);
9233         md_new_event(mddev);
9234 }
9235
9236 /*
9237  * This routine is regularly called by all per-raid-array threads to
9238  * deal with generic issues like resync and super-block update.
9239  * Raid personalities that don't have a thread (linear/raid0) do not
9240  * need this as they never do any recovery or update the superblock.
9241  *
9242  * It does not do any resync itself, but rather "forks" off other threads
9243  * to do that as needed.
9244  * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
9245  * "->recovery" and create a thread at ->sync_thread.
9246  * When the thread finishes it sets MD_RECOVERY_DONE
9247  * and wakeups up this thread which will reap the thread and finish up.
9248  * This thread also removes any faulty devices (with nr_pending == 0).
9249  *
9250  * The overall approach is:
9251  *  1/ if the superblock needs updating, update it.
9252  *  2/ If a recovery thread is running, don't do anything else.
9253  *  3/ If recovery has finished, clean up, possibly marking spares active.
9254  *  4/ If there are any faulty devices, remove them.
9255  *  5/ If array is degraded, try to add spares devices
9256  *  6/ If array has spares or is not in-sync, start a resync thread.
9257  */
9258 void md_check_recovery(struct mddev *mddev)
9259 {
9260         if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) {
9261                 /* Write superblock - thread that called mddev_suspend()
9262                  * holds reconfig_mutex for us.
9263                  */
9264                 set_bit(MD_UPDATING_SB, &mddev->flags);
9265                 smp_mb__after_atomic();
9266                 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags))
9267                         md_update_sb(mddev, 0);
9268                 clear_bit_unlock(MD_UPDATING_SB, &mddev->flags);
9269                 wake_up(&mddev->sb_wait);
9270         }
9271
9272         if (mddev->suspended)
9273                 return;
9274
9275         if (mddev->bitmap)
9276                 md_bitmap_daemon_work(mddev);
9277
9278         if (signal_pending(current)) {
9279                 if (mddev->pers->sync_request && !mddev->external) {
9280                         pr_debug("md: %s in immediate safe mode\n",
9281                                  mdname(mddev));
9282                         mddev->safemode = 2;
9283                 }
9284                 flush_signals(current);
9285         }
9286
9287         if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
9288                 return;
9289         if ( ! (
9290                 (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) ||
9291                 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
9292                 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
9293                 (mddev->external == 0 && mddev->safemode == 1) ||
9294                 (mddev->safemode == 2
9295                  && !mddev->in_sync && mddev->recovery_cp == MaxSector)
9296                 ))
9297                 return;
9298
9299         if (mddev_trylock(mddev)) {
9300                 int spares = 0;
9301                 bool try_set_sync = mddev->safemode != 0;
9302
9303                 if (!mddev->external && mddev->safemode == 1)
9304                         mddev->safemode = 0;
9305
9306                 if (mddev->ro) {
9307                         struct md_rdev *rdev;
9308                         if (!mddev->external && mddev->in_sync)
9309                                 /* 'Blocked' flag not needed as failed devices
9310                                  * will be recorded if array switched to read/write.
9311                                  * Leaving it set will prevent the device
9312                                  * from being removed.
9313                                  */
9314                                 rdev_for_each(rdev, mddev)
9315                                         clear_bit(Blocked, &rdev->flags);
9316                         /* On a read-only array we can:
9317                          * - remove failed devices
9318                          * - add already-in_sync devices if the array itself
9319                          *   is in-sync.
9320                          * As we only add devices that are already in-sync,
9321                          * we can activate the spares immediately.
9322                          */
9323                         remove_and_add_spares(mddev, NULL);
9324                         /* There is no thread, but we need to call
9325                          * ->spare_active and clear saved_raid_disk
9326                          */
9327                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
9328                         md_reap_sync_thread(mddev);
9329                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
9330                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9331                         clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
9332                         goto unlock;
9333                 }
9334
9335                 if (mddev_is_clustered(mddev)) {
9336                         struct md_rdev *rdev, *tmp;
9337                         /* kick the device if another node issued a
9338                          * remove disk.
9339                          */
9340                         rdev_for_each_safe(rdev, tmp, mddev) {
9341                                 if (test_and_clear_bit(ClusterRemove, &rdev->flags) &&
9342                                                 rdev->raid_disk < 0)
9343                                         md_kick_rdev_from_array(rdev);
9344                         }
9345                 }
9346
9347                 if (try_set_sync && !mddev->external && !mddev->in_sync) {
9348                         spin_lock(&mddev->lock);
9349                         set_in_sync(mddev);
9350                         spin_unlock(&mddev->lock);
9351                 }
9352
9353                 if (mddev->sb_flags)
9354                         md_update_sb(mddev, 0);
9355
9356                 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
9357                     !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
9358                         /* resync/recovery still happening */
9359                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9360                         goto unlock;
9361                 }
9362                 if (mddev->sync_thread) {
9363                         md_reap_sync_thread(mddev);
9364                         goto unlock;
9365                 }
9366                 /* Set RUNNING before clearing NEEDED to avoid
9367                  * any transients in the value of "sync_action".
9368                  */
9369                 mddev->curr_resync_completed = 0;
9370                 spin_lock(&mddev->lock);
9371                 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
9372                 spin_unlock(&mddev->lock);
9373                 /* Clear some bits that don't mean anything, but
9374                  * might be left set
9375                  */
9376                 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
9377                 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
9378
9379                 if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
9380                     test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
9381                         goto not_running;
9382                 /* no recovery is running.
9383                  * remove any failed drives, then
9384                  * add spares if possible.
9385                  * Spares are also removed and re-added, to allow
9386                  * the personality to fail the re-add.
9387                  */
9388
9389                 if (mddev->reshape_position != MaxSector) {
9390                         if (mddev->pers->check_reshape == NULL ||
9391                             mddev->pers->check_reshape(mddev) != 0)
9392                                 /* Cannot proceed */
9393                                 goto not_running;
9394                         set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
9395                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
9396                 } else if ((spares = remove_and_add_spares(mddev, NULL))) {
9397                         clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9398                         clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
9399                         clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
9400                         set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
9401                 } else if (mddev->recovery_cp < MaxSector) {
9402                         set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9403                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
9404                 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
9405                         /* nothing to be done ... */
9406                         goto not_running;
9407
9408                 if (mddev->pers->sync_request) {
9409                         if (spares) {
9410                                 /* We are adding a device or devices to an array
9411                                  * which has the bitmap stored on all devices.
9412                                  * So make sure all bitmap pages get written
9413                                  */
9414                                 md_bitmap_write_all(mddev->bitmap);
9415                         }
9416                         INIT_WORK(&mddev->del_work, md_start_sync);
9417                         queue_work(md_misc_wq, &mddev->del_work);
9418                         goto unlock;
9419                 }
9420         not_running:
9421                 if (!mddev->sync_thread) {
9422                         clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
9423                         wake_up(&resync_wait);
9424                         if (test_and_clear_bit(MD_RECOVERY_RECOVER,
9425                                                &mddev->recovery))
9426                                 if (mddev->sysfs_action)
9427                                         sysfs_notify_dirent_safe(mddev->sysfs_action);
9428                 }
9429         unlock:
9430                 wake_up(&mddev->sb_wait);
9431                 mddev_unlock(mddev);
9432         }
9433 }
9434 EXPORT_SYMBOL(md_check_recovery);
9435
9436 void md_reap_sync_thread(struct mddev *mddev)
9437 {
9438         struct md_rdev *rdev;
9439         sector_t old_dev_sectors = mddev->dev_sectors;
9440         bool is_reshaped = false;
9441
9442         /* resync has finished, collect result */
9443         md_unregister_thread(&mddev->sync_thread);
9444         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
9445             !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
9446             mddev->degraded != mddev->raid_disks) {
9447                 /* success...*/
9448                 /* activate any spares */
9449                 if (mddev->pers->spare_active(mddev)) {
9450                         sysfs_notify_dirent_safe(mddev->sysfs_degraded);
9451                         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
9452                 }
9453         }
9454         if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
9455             mddev->pers->finish_reshape) {
9456                 mddev->pers->finish_reshape(mddev);
9457                 if (mddev_is_clustered(mddev))
9458                         is_reshaped = true;
9459         }
9460
9461         /* If array is no-longer degraded, then any saved_raid_disk
9462          * information must be scrapped.
9463          */
9464         if (!mddev->degraded)
9465                 rdev_for_each(rdev, mddev)
9466                         rdev->saved_raid_disk = -1;
9467
9468         md_update_sb(mddev, 1);
9469         /* MD_SB_CHANGE_PENDING should be cleared by md_update_sb, so we can
9470          * call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by
9471          * clustered raid */
9472         if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags))
9473                 md_cluster_ops->resync_finish(mddev);
9474         clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
9475         clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
9476         clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9477         clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
9478         clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
9479         clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
9480         /*
9481          * We call md_cluster_ops->update_size here because sync_size could
9482          * be changed by md_update_sb, and MD_RECOVERY_RESHAPE is cleared,
9483          * so it is time to update size across cluster.
9484          */
9485         if (mddev_is_clustered(mddev) && is_reshaped
9486                                       && !test_bit(MD_CLOSING, &mddev->flags))
9487                 md_cluster_ops->update_size(mddev, old_dev_sectors);
9488         wake_up(&resync_wait);
9489         /* flag recovery needed just to double check */
9490         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9491         sysfs_notify_dirent_safe(mddev->sysfs_completed);
9492         sysfs_notify_dirent_safe(mddev->sysfs_action);
9493         md_new_event(mddev);
9494         if (mddev->event_work.func)
9495                 queue_work(md_misc_wq, &mddev->event_work);
9496 }
9497 EXPORT_SYMBOL(md_reap_sync_thread);
9498
9499 void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
9500 {
9501         sysfs_notify_dirent_safe(rdev->sysfs_state);
9502         wait_event_timeout(rdev->blocked_wait,
9503                            !test_bit(Blocked, &rdev->flags) &&
9504                            !test_bit(BlockedBadBlocks, &rdev->flags),
9505                            msecs_to_jiffies(5000));
9506         rdev_dec_pending(rdev, mddev);
9507 }
9508 EXPORT_SYMBOL(md_wait_for_blocked_rdev);
9509
9510 void md_finish_reshape(struct mddev *mddev)
9511 {
9512         /* called be personality module when reshape completes. */
9513         struct md_rdev *rdev;
9514
9515         rdev_for_each(rdev, mddev) {
9516                 if (rdev->data_offset > rdev->new_data_offset)
9517                         rdev->sectors += rdev->data_offset - rdev->new_data_offset;
9518                 else
9519                         rdev->sectors -= rdev->new_data_offset - rdev->data_offset;
9520                 rdev->data_offset = rdev->new_data_offset;
9521         }
9522 }
9523 EXPORT_SYMBOL(md_finish_reshape);
9524
9525 /* Bad block management */
9526
9527 /* Returns 1 on success, 0 on failure */
9528 int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
9529                        int is_new)
9530 {
9531         struct mddev *mddev = rdev->mddev;
9532         int rv;
9533         if (is_new)
9534                 s += rdev->new_data_offset;
9535         else
9536                 s += rdev->data_offset;
9537         rv = badblocks_set(&rdev->badblocks, s, sectors, 0);
9538         if (rv == 0) {
9539                 /* Make sure they get written out promptly */
9540                 if (test_bit(ExternalBbl, &rdev->flags))
9541                         sysfs_notify_dirent_safe(rdev->sysfs_unack_badblocks);
9542                 sysfs_notify_dirent_safe(rdev->sysfs_state);
9543                 set_mask_bits(&mddev->sb_flags, 0,
9544                               BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING));
9545                 md_wakeup_thread(rdev->mddev->thread);
9546                 return 1;
9547         } else
9548                 return 0;
9549 }
9550 EXPORT_SYMBOL_GPL(rdev_set_badblocks);
9551
9552 int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
9553                          int is_new)
9554 {
9555         int rv;
9556         if (is_new)
9557                 s += rdev->new_data_offset;
9558         else
9559                 s += rdev->data_offset;
9560         rv = badblocks_clear(&rdev->badblocks, s, sectors);
9561         if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags))
9562                 sysfs_notify_dirent_safe(rdev->sysfs_badblocks);
9563         return rv;
9564 }
9565 EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
9566
9567 static int md_notify_reboot(struct notifier_block *this,
9568                             unsigned long code, void *x)
9569 {
9570         struct list_head *tmp;
9571         struct mddev *mddev;
9572         int need_delay = 0;
9573
9574         for_each_mddev(mddev, tmp) {
9575                 if (mddev_trylock(mddev)) {
9576                         if (mddev->pers)
9577                                 __md_stop_writes(mddev);
9578                         if (mddev->persistent)
9579                                 mddev->safemode = 2;
9580                         mddev_unlock(mddev);
9581                 }
9582                 need_delay = 1;
9583         }
9584         /*
9585          * certain more exotic SCSI devices are known to be
9586          * volatile wrt too early system reboots. While the
9587          * right place to handle this issue is the given
9588          * driver, we do want to have a safe RAID driver ...
9589          */
9590         if (need_delay)
9591                 mdelay(1000*1);
9592
9593         return NOTIFY_DONE;
9594 }
9595
9596 static struct notifier_block md_notifier = {
9597         .notifier_call  = md_notify_reboot,
9598         .next           = NULL,
9599         .priority       = INT_MAX, /* before any real devices */
9600 };
9601
9602 static void md_geninit(void)
9603 {
9604         pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
9605
9606         proc_create("mdstat", S_IRUGO, NULL, &mdstat_proc_ops);
9607 }
9608
9609 static int __init md_init(void)
9610 {
9611         int ret = -ENOMEM;
9612
9613         md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
9614         if (!md_wq)
9615                 goto err_wq;
9616
9617         md_misc_wq = alloc_workqueue("md_misc", 0, 0);
9618         if (!md_misc_wq)
9619                 goto err_misc_wq;
9620
9621         md_rdev_misc_wq = alloc_workqueue("md_rdev_misc", 0, 0);
9622         if (!md_rdev_misc_wq)
9623                 goto err_rdev_misc_wq;
9624
9625         if ((ret = register_blkdev(MD_MAJOR, "md")) < 0)
9626                 goto err_md;
9627
9628         if ((ret = register_blkdev(0, "mdp")) < 0)
9629                 goto err_mdp;
9630         mdp_major = ret;
9631
9632         blk_register_region(MKDEV(MD_MAJOR, 0), 512, THIS_MODULE,
9633                             md_probe, NULL, NULL);
9634         blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
9635                             md_probe, NULL, NULL);
9636
9637         register_reboot_notifier(&md_notifier);
9638         raid_table_header = register_sysctl_table(raid_root_table);
9639
9640         md_geninit();
9641         return 0;
9642
9643 err_mdp:
9644         unregister_blkdev(MD_MAJOR, "md");
9645 err_md:
9646         destroy_workqueue(md_rdev_misc_wq);
9647 err_rdev_misc_wq:
9648         destroy_workqueue(md_misc_wq);
9649 err_misc_wq:
9650         destroy_workqueue(md_wq);
9651 err_wq:
9652         return ret;
9653 }
9654
9655 static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
9656 {
9657         struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
9658         struct md_rdev *rdev2, *tmp;
9659         int role, ret;
9660         char b[BDEVNAME_SIZE];
9661
9662         /*
9663          * If size is changed in another node then we need to
9664          * do resize as well.
9665          */
9666         if (mddev->dev_sectors != le64_to_cpu(sb->size)) {
9667                 ret = mddev->pers->resize(mddev, le64_to_cpu(sb->size));
9668                 if (ret)
9669                         pr_info("md-cluster: resize failed\n");
9670                 else
9671                         md_bitmap_update_sb(mddev->bitmap);
9672         }
9673
9674         /* Check for change of roles in the active devices */
9675         rdev_for_each_safe(rdev2, tmp, mddev) {
9676                 if (test_bit(Faulty, &rdev2->flags))
9677                         continue;
9678
9679                 /* Check if the roles changed */
9680                 role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]);
9681
9682                 if (test_bit(Candidate, &rdev2->flags)) {
9683                         if (role == 0xfffe) {
9684                                 pr_info("md: Removing Candidate device %s because add failed\n", bdevname(rdev2->bdev,b));
9685                                 md_kick_rdev_from_array(rdev2);
9686                                 continue;
9687                         }
9688                         else
9689                                 clear_bit(Candidate, &rdev2->flags);
9690                 }
9691
9692                 if (role != rdev2->raid_disk) {
9693                         /*
9694                          * got activated except reshape is happening.
9695                          */
9696                         if (rdev2->raid_disk == -1 && role != 0xffff &&
9697                             !(le32_to_cpu(sb->feature_map) &
9698                               MD_FEATURE_RESHAPE_ACTIVE)) {
9699                                 rdev2->saved_raid_disk = role;
9700                                 ret = remove_and_add_spares(mddev, rdev2);
9701                                 pr_info("Activated spare: %s\n",
9702                                         bdevname(rdev2->bdev,b));
9703                                 /* wakeup mddev->thread here, so array could
9704                                  * perform resync with the new activated disk */
9705                                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9706                                 md_wakeup_thread(mddev->thread);
9707                         }
9708                         /* device faulty
9709                          * We just want to do the minimum to mark the disk
9710                          * as faulty. The recovery is performed by the
9711                          * one who initiated the error.
9712                          */
9713                         if ((role == 0xfffe) || (role == 0xfffd)) {
9714                                 md_error(mddev, rdev2);
9715                                 clear_bit(Blocked, &rdev2->flags);
9716                         }
9717                 }
9718         }
9719
9720         if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) {
9721                 ret = update_raid_disks(mddev, le32_to_cpu(sb->raid_disks));
9722                 if (ret)
9723                         pr_warn("md: updating array disks failed. %d\n", ret);
9724         }
9725
9726         /*
9727          * Since mddev->delta_disks has already updated in update_raid_disks,
9728          * so it is time to check reshape.
9729          */
9730         if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
9731             (le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
9732                 /*
9733                  * reshape is happening in the remote node, we need to
9734                  * update reshape_position and call start_reshape.
9735                  */
9736                 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
9737                 if (mddev->pers->update_reshape_pos)
9738                         mddev->pers->update_reshape_pos(mddev);
9739                 if (mddev->pers->start_reshape)
9740                         mddev->pers->start_reshape(mddev);
9741         } else if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
9742                    mddev->reshape_position != MaxSector &&
9743                    !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
9744                 /* reshape is just done in another node. */
9745                 mddev->reshape_position = MaxSector;
9746                 if (mddev->pers->update_reshape_pos)
9747                         mddev->pers->update_reshape_pos(mddev);
9748         }
9749
9750         /* Finally set the event to be up to date */
9751         mddev->events = le64_to_cpu(sb->events);
9752 }
9753
9754 static int read_rdev(struct mddev *mddev, struct md_rdev *rdev)
9755 {
9756         int err;
9757         struct page *swapout = rdev->sb_page;
9758         struct mdp_superblock_1 *sb;
9759
9760         /* Store the sb page of the rdev in the swapout temporary
9761          * variable in case we err in the future
9762          */
9763         rdev->sb_page = NULL;
9764         err = alloc_disk_sb(rdev);
9765         if (err == 0) {
9766                 ClearPageUptodate(rdev->sb_page);
9767                 rdev->sb_loaded = 0;
9768                 err = super_types[mddev->major_version].
9769                         load_super(rdev, NULL, mddev->minor_version);
9770         }
9771         if (err < 0) {
9772                 pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n",
9773                                 __func__, __LINE__, rdev->desc_nr, err);
9774                 if (rdev->sb_page)
9775                         put_page(rdev->sb_page);
9776                 rdev->sb_page = swapout;
9777                 rdev->sb_loaded = 1;
9778                 return err;
9779         }
9780
9781         sb = page_address(rdev->sb_page);
9782         /* Read the offset unconditionally, even if MD_FEATURE_RECOVERY_OFFSET
9783          * is not set
9784          */
9785
9786         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_OFFSET))
9787                 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
9788
9789         /* The other node finished recovery, call spare_active to set
9790          * device In_sync and mddev->degraded
9791          */
9792         if (rdev->recovery_offset == MaxSector &&
9793             !test_bit(In_sync, &rdev->flags) &&
9794             mddev->pers->spare_active(mddev))
9795                 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
9796
9797         put_page(swapout);
9798         return 0;
9799 }
9800
9801 void md_reload_sb(struct mddev *mddev, int nr)
9802 {
9803         struct md_rdev *rdev = NULL, *iter;
9804         int err;
9805
9806         /* Find the rdev */
9807         rdev_for_each_rcu(iter, mddev) {
9808                 if (iter->desc_nr == nr) {
9809                         rdev = iter;
9810                         break;
9811                 }
9812         }
9813
9814         if (!rdev) {
9815                 pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr);
9816                 return;
9817         }
9818
9819         err = read_rdev(mddev, rdev);
9820         if (err < 0)
9821                 return;
9822
9823         check_sb_changes(mddev, rdev);
9824
9825         /* Read all rdev's to update recovery_offset */
9826         rdev_for_each_rcu(rdev, mddev) {
9827                 if (!test_bit(Faulty, &rdev->flags))
9828                         read_rdev(mddev, rdev);
9829         }
9830 }
9831 EXPORT_SYMBOL(md_reload_sb);
9832
9833 #ifndef MODULE
9834
9835 /*
9836  * Searches all registered partitions for autorun RAID arrays
9837  * at boot time.
9838  */
9839
9840 static DEFINE_MUTEX(detected_devices_mutex);
9841 static LIST_HEAD(all_detected_devices);
9842 struct detected_devices_node {
9843         struct list_head list;
9844         dev_t dev;
9845 };
9846
9847 void md_autodetect_dev(dev_t dev)
9848 {
9849         struct detected_devices_node *node_detected_dev;
9850
9851         node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
9852         if (node_detected_dev) {
9853                 node_detected_dev->dev = dev;
9854                 mutex_lock(&detected_devices_mutex);
9855                 list_add_tail(&node_detected_dev->list, &all_detected_devices);
9856                 mutex_unlock(&detected_devices_mutex);
9857         }
9858 }
9859
9860 void md_autostart_arrays(int part)
9861 {
9862         struct md_rdev *rdev;
9863         struct detected_devices_node *node_detected_dev;
9864         dev_t dev;
9865         int i_scanned, i_passed;
9866
9867         i_scanned = 0;
9868         i_passed = 0;
9869
9870         pr_info("md: Autodetecting RAID arrays.\n");
9871
9872         mutex_lock(&detected_devices_mutex);
9873         while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
9874                 i_scanned++;
9875                 node_detected_dev = list_entry(all_detected_devices.next,
9876                                         struct detected_devices_node, list);
9877                 list_del(&node_detected_dev->list);
9878                 dev = node_detected_dev->dev;
9879                 kfree(node_detected_dev);
9880                 mutex_unlock(&detected_devices_mutex);
9881                 rdev = md_import_device(dev,0, 90);
9882                 mutex_lock(&detected_devices_mutex);
9883                 if (IS_ERR(rdev))
9884                         continue;
9885
9886                 if (test_bit(Faulty, &rdev->flags))
9887                         continue;
9888
9889                 set_bit(AutoDetected, &rdev->flags);
9890                 list_add(&rdev->same_set, &pending_raid_disks);
9891                 i_passed++;
9892         }
9893         mutex_unlock(&detected_devices_mutex);
9894
9895         pr_debug("md: Scanned %d and added %d devices.\n", i_scanned, i_passed);
9896
9897         autorun_devices(part);
9898 }
9899
9900 #endif /* !MODULE */
9901
9902 static __exit void md_exit(void)
9903 {
9904         struct mddev *mddev;
9905         struct list_head *tmp;
9906         int delay = 1;
9907
9908         blk_unregister_region(MKDEV(MD_MAJOR,0), 512);
9909         blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
9910
9911         unregister_blkdev(MD_MAJOR,"md");
9912         unregister_blkdev(mdp_major, "mdp");
9913         unregister_reboot_notifier(&md_notifier);
9914         unregister_sysctl_table(raid_table_header);
9915
9916         /* We cannot unload the modules while some process is
9917          * waiting for us in select() or poll() - wake them up
9918          */
9919         md_unloading = 1;
9920         while (waitqueue_active(&md_event_waiters)) {
9921                 /* not safe to leave yet */
9922                 wake_up(&md_event_waiters);
9923                 msleep(delay);
9924                 delay += delay;
9925         }
9926         remove_proc_entry("mdstat", NULL);
9927
9928         for_each_mddev(mddev, tmp) {
9929                 export_array(mddev);
9930                 mddev->ctime = 0;
9931                 mddev->hold_active = 0;
9932                 /*
9933                  * for_each_mddev() will call mddev_put() at the end of each
9934                  * iteration.  As the mddev is now fully clear, this will
9935                  * schedule the mddev for destruction by a workqueue, and the
9936                  * destroy_workqueue() below will wait for that to complete.
9937                  */
9938         }
9939         destroy_workqueue(md_rdev_misc_wq);
9940         destroy_workqueue(md_misc_wq);
9941         destroy_workqueue(md_wq);
9942 }
9943
9944 subsys_initcall(md_init);
9945 module_exit(md_exit)
9946
9947 static int get_ro(char *buffer, const struct kernel_param *kp)
9948 {
9949         return sprintf(buffer, "%d\n", start_readonly);
9950 }
9951 static int set_ro(const char *val, const struct kernel_param *kp)
9952 {
9953         return kstrtouint(val, 10, (unsigned int *)&start_readonly);
9954 }
9955
9956 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
9957 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
9958 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
9959 module_param(create_on_open, bool, S_IRUSR|S_IWUSR);
9960
9961 MODULE_LICENSE("GPL");
9962 MODULE_DESCRIPTION("MD RAID framework");
9963 MODULE_ALIAS("md");
9964 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);