GNU Linux-libre 5.10.153-gnu1
[releases.git] / drivers / md / md.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3    md.c : Multiple Devices driver for Linux
4      Copyright (C) 1998, 1999, 2000 Ingo Molnar
5
6      completely rewritten, based on the MD driver code from Marc Zyngier
7
8    Changes:
9
10    - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
11    - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
12    - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
13    - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
14    - kmod support by: Cyrus Durgin
15    - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
16    - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
17
18    - lots of fixes and improvements to the RAID1/RAID5 and generic
19      RAID code (such as request based resynchronization):
20
21      Neil Brown <neilb@cse.unsw.edu.au>.
22
23    - persistent bitmap code
24      Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
25
26
27    Errors, Warnings, etc.
28    Please use:
29      pr_crit() for error conditions that risk data loss
30      pr_err() for error conditions that are unexpected, like an IO error
31          or internal inconsistency
32      pr_warn() for error conditions that could have been predicated, like
33          adding a device to an array when it has incompatible metadata
34      pr_info() for every interesting, very rare events, like an array starting
35          or stopping, or resync starting or stopping
36      pr_debug() for everything else.
37
38 */
39
40 #include <linux/sched/mm.h>
41 #include <linux/sched/signal.h>
42 #include <linux/kthread.h>
43 #include <linux/blkdev.h>
44 #include <linux/badblocks.h>
45 #include <linux/sysctl.h>
46 #include <linux/seq_file.h>
47 #include <linux/fs.h>
48 #include <linux/poll.h>
49 #include <linux/ctype.h>
50 #include <linux/string.h>
51 #include <linux/hdreg.h>
52 #include <linux/proc_fs.h>
53 #include <linux/random.h>
54 #include <linux/module.h>
55 #include <linux/reboot.h>
56 #include <linux/file.h>
57 #include <linux/compat.h>
58 #include <linux/delay.h>
59 #include <linux/raid/md_p.h>
60 #include <linux/raid/md_u.h>
61 #include <linux/raid/detect.h>
62 #include <linux/slab.h>
63 #include <linux/percpu-refcount.h>
64 #include <linux/part_stat.h>
65
66 #include <trace/events/block.h>
67 #include "md.h"
68 #include "md-bitmap.h"
69 #include "md-cluster.h"
70
71 /* pers_list is a list of registered personalities protected
72  * by pers_lock.
73  * pers_lock does extra service to protect accesses to
74  * mddev->thread when the mutex cannot be held.
75  */
76 static LIST_HEAD(pers_list);
77 static DEFINE_SPINLOCK(pers_lock);
78
79 static struct kobj_type md_ktype;
80
81 struct md_cluster_operations *md_cluster_ops;
82 EXPORT_SYMBOL(md_cluster_ops);
83 static struct module *md_cluster_mod;
84
85 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
86 static struct workqueue_struct *md_wq;
87 static struct workqueue_struct *md_misc_wq;
88 static struct workqueue_struct *md_rdev_misc_wq;
89
90 static int remove_and_add_spares(struct mddev *mddev,
91                                  struct md_rdev *this);
92 static void mddev_detach(struct mddev *mddev);
93
94 /*
95  * Default number of read corrections we'll attempt on an rdev
96  * before ejecting it from the array. We divide the read error
97  * count by 2 for every hour elapsed between read errors.
98  */
99 #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
100 /* Default safemode delay: 200 msec */
101 #define DEFAULT_SAFEMODE_DELAY ((200 * HZ)/1000 +1)
102 /*
103  * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
104  * is 1000 KB/sec, so the extra system load does not show up that much.
105  * Increase it if you want to have more _guaranteed_ speed. Note that
106  * the RAID driver will use the maximum available bandwidth if the IO
107  * subsystem is idle. There is also an 'absolute maximum' reconstruction
108  * speed limit - in case reconstruction slows down your system despite
109  * idle IO detection.
110  *
111  * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
112  * or /sys/block/mdX/md/sync_speed_{min,max}
113  */
114
115 static int sysctl_speed_limit_min = 1000;
116 static int sysctl_speed_limit_max = 200000;
117 static inline int speed_min(struct mddev *mddev)
118 {
119         return mddev->sync_speed_min ?
120                 mddev->sync_speed_min : sysctl_speed_limit_min;
121 }
122
123 static inline int speed_max(struct mddev *mddev)
124 {
125         return mddev->sync_speed_max ?
126                 mddev->sync_speed_max : sysctl_speed_limit_max;
127 }
128
129 static void rdev_uninit_serial(struct md_rdev *rdev)
130 {
131         if (!test_and_clear_bit(CollisionCheck, &rdev->flags))
132                 return;
133
134         kvfree(rdev->serial);
135         rdev->serial = NULL;
136 }
137
138 static void rdevs_uninit_serial(struct mddev *mddev)
139 {
140         struct md_rdev *rdev;
141
142         rdev_for_each(rdev, mddev)
143                 rdev_uninit_serial(rdev);
144 }
145
146 static int rdev_init_serial(struct md_rdev *rdev)
147 {
148         /* serial_nums equals with BARRIER_BUCKETS_NR */
149         int i, serial_nums = 1 << ((PAGE_SHIFT - ilog2(sizeof(atomic_t))));
150         struct serial_in_rdev *serial = NULL;
151
152         if (test_bit(CollisionCheck, &rdev->flags))
153                 return 0;
154
155         serial = kvmalloc(sizeof(struct serial_in_rdev) * serial_nums,
156                           GFP_KERNEL);
157         if (!serial)
158                 return -ENOMEM;
159
160         for (i = 0; i < serial_nums; i++) {
161                 struct serial_in_rdev *serial_tmp = &serial[i];
162
163                 spin_lock_init(&serial_tmp->serial_lock);
164                 serial_tmp->serial_rb = RB_ROOT_CACHED;
165                 init_waitqueue_head(&serial_tmp->serial_io_wait);
166         }
167
168         rdev->serial = serial;
169         set_bit(CollisionCheck, &rdev->flags);
170
171         return 0;
172 }
173
174 static int rdevs_init_serial(struct mddev *mddev)
175 {
176         struct md_rdev *rdev;
177         int ret = 0;
178
179         rdev_for_each(rdev, mddev) {
180                 ret = rdev_init_serial(rdev);
181                 if (ret)
182                         break;
183         }
184
185         /* Free all resources if pool is not existed */
186         if (ret && !mddev->serial_info_pool)
187                 rdevs_uninit_serial(mddev);
188
189         return ret;
190 }
191
192 /*
193  * rdev needs to enable serial stuffs if it meets the conditions:
194  * 1. it is multi-queue device flaged with writemostly.
195  * 2. the write-behind mode is enabled.
196  */
197 static int rdev_need_serial(struct md_rdev *rdev)
198 {
199         return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 &&
200                 rdev->bdev->bd_disk->queue->nr_hw_queues != 1 &&
201                 test_bit(WriteMostly, &rdev->flags));
202 }
203
204 /*
205  * Init resource for rdev(s), then create serial_info_pool if:
206  * 1. rdev is the first device which return true from rdev_enable_serial.
207  * 2. rdev is NULL, means we want to enable serialization for all rdevs.
208  */
209 void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
210                               bool is_suspend)
211 {
212         int ret = 0;
213
214         if (rdev && !rdev_need_serial(rdev) &&
215             !test_bit(CollisionCheck, &rdev->flags))
216                 return;
217
218         if (!is_suspend)
219                 mddev_suspend(mddev);
220
221         if (!rdev)
222                 ret = rdevs_init_serial(mddev);
223         else
224                 ret = rdev_init_serial(rdev);
225         if (ret)
226                 goto abort;
227
228         if (mddev->serial_info_pool == NULL) {
229                 /*
230                  * already in memalloc noio context by
231                  * mddev_suspend()
232                  */
233                 mddev->serial_info_pool =
234                         mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
235                                                 sizeof(struct serial_info));
236                 if (!mddev->serial_info_pool) {
237                         rdevs_uninit_serial(mddev);
238                         pr_err("can't alloc memory pool for serialization\n");
239                 }
240         }
241
242 abort:
243         if (!is_suspend)
244                 mddev_resume(mddev);
245 }
246
247 /*
248  * Free resource from rdev(s), and destroy serial_info_pool under conditions:
249  * 1. rdev is the last device flaged with CollisionCheck.
250  * 2. when bitmap is destroyed while policy is not enabled.
251  * 3. for disable policy, the pool is destroyed only when no rdev needs it.
252  */
253 void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
254                                bool is_suspend)
255 {
256         if (rdev && !test_bit(CollisionCheck, &rdev->flags))
257                 return;
258
259         if (mddev->serial_info_pool) {
260                 struct md_rdev *temp;
261                 int num = 0; /* used to track if other rdevs need the pool */
262
263                 if (!is_suspend)
264                         mddev_suspend(mddev);
265                 rdev_for_each(temp, mddev) {
266                         if (!rdev) {
267                                 if (!mddev->serialize_policy ||
268                                     !rdev_need_serial(temp))
269                                         rdev_uninit_serial(temp);
270                                 else
271                                         num++;
272                         } else if (temp != rdev &&
273                                    test_bit(CollisionCheck, &temp->flags))
274                                 num++;
275                 }
276
277                 if (rdev)
278                         rdev_uninit_serial(rdev);
279
280                 if (num)
281                         pr_info("The mempool could be used by other devices\n");
282                 else {
283                         mempool_destroy(mddev->serial_info_pool);
284                         mddev->serial_info_pool = NULL;
285                 }
286                 if (!is_suspend)
287                         mddev_resume(mddev);
288         }
289 }
290
291 static struct ctl_table_header *raid_table_header;
292
293 static struct ctl_table raid_table[] = {
294         {
295                 .procname       = "speed_limit_min",
296                 .data           = &sysctl_speed_limit_min,
297                 .maxlen         = sizeof(int),
298                 .mode           = S_IRUGO|S_IWUSR,
299                 .proc_handler   = proc_dointvec,
300         },
301         {
302                 .procname       = "speed_limit_max",
303                 .data           = &sysctl_speed_limit_max,
304                 .maxlen         = sizeof(int),
305                 .mode           = S_IRUGO|S_IWUSR,
306                 .proc_handler   = proc_dointvec,
307         },
308         { }
309 };
310
311 static struct ctl_table raid_dir_table[] = {
312         {
313                 .procname       = "raid",
314                 .maxlen         = 0,
315                 .mode           = S_IRUGO|S_IXUGO,
316                 .child          = raid_table,
317         },
318         { }
319 };
320
321 static struct ctl_table raid_root_table[] = {
322         {
323                 .procname       = "dev",
324                 .maxlen         = 0,
325                 .mode           = 0555,
326                 .child          = raid_dir_table,
327         },
328         {  }
329 };
330
331 static int start_readonly;
332
333 /*
334  * The original mechanism for creating an md device is to create
335  * a device node in /dev and to open it.  This causes races with device-close.
336  * The preferred method is to write to the "new_array" module parameter.
337  * This can avoid races.
338  * Setting create_on_open to false disables the original mechanism
339  * so all the races disappear.
340  */
341 static bool create_on_open = true;
342
343 struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
344                             struct mddev *mddev)
345 {
346         if (!mddev || !bioset_initialized(&mddev->bio_set))
347                 return bio_alloc(gfp_mask, nr_iovecs);
348
349         return bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set);
350 }
351 EXPORT_SYMBOL_GPL(bio_alloc_mddev);
352
353 static struct bio *md_bio_alloc_sync(struct mddev *mddev)
354 {
355         if (!mddev || !bioset_initialized(&mddev->sync_set))
356                 return bio_alloc(GFP_NOIO, 1);
357
358         return bio_alloc_bioset(GFP_NOIO, 1, &mddev->sync_set);
359 }
360
361 /*
362  * We have a system wide 'event count' that is incremented
363  * on any 'interesting' event, and readers of /proc/mdstat
364  * can use 'poll' or 'select' to find out when the event
365  * count increases.
366  *
367  * Events are:
368  *  start array, stop array, error, add device, remove device,
369  *  start build, activate spare
370  */
371 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
372 static atomic_t md_event_count;
373 void md_new_event(struct mddev *mddev)
374 {
375         atomic_inc(&md_event_count);
376         wake_up(&md_event_waiters);
377 }
378 EXPORT_SYMBOL_GPL(md_new_event);
379
380 /*
381  * Enables to iterate over all existing md arrays
382  * all_mddevs_lock protects this list.
383  */
384 static LIST_HEAD(all_mddevs);
385 static DEFINE_SPINLOCK(all_mddevs_lock);
386
387 /*
388  * iterates through all used mddevs in the system.
389  * We take care to grab the all_mddevs_lock whenever navigating
390  * the list, and to always hold a refcount when unlocked.
391  * Any code which breaks out of this loop while own
392  * a reference to the current mddev and must mddev_put it.
393  */
394 #define for_each_mddev(_mddev,_tmp)                                     \
395                                                                         \
396         for (({ spin_lock(&all_mddevs_lock);                            \
397                 _tmp = all_mddevs.next;                                 \
398                 _mddev = NULL;});                                       \
399              ({ if (_tmp != &all_mddevs)                                \
400                         mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
401                 spin_unlock(&all_mddevs_lock);                          \
402                 if (_mddev) mddev_put(_mddev);                          \
403                 _mddev = list_entry(_tmp, struct mddev, all_mddevs);    \
404                 _tmp != &all_mddevs;});                                 \
405              ({ spin_lock(&all_mddevs_lock);                            \
406                 _tmp = _tmp->next;})                                    \
407                 )
408
409 /* Rather than calling directly into the personality make_request function,
410  * IO requests come here first so that we can check if the device is
411  * being suspended pending a reconfiguration.
412  * We hold a refcount over the call to ->make_request.  By the time that
413  * call has finished, the bio has been linked into some internal structure
414  * and so is visible to ->quiesce(), so we don't need the refcount any more.
415  */
416 static bool is_suspended(struct mddev *mddev, struct bio *bio)
417 {
418         if (mddev->suspended)
419                 return true;
420         if (bio_data_dir(bio) != WRITE)
421                 return false;
422         if (mddev->suspend_lo >= mddev->suspend_hi)
423                 return false;
424         if (bio->bi_iter.bi_sector >= mddev->suspend_hi)
425                 return false;
426         if (bio_end_sector(bio) < mddev->suspend_lo)
427                 return false;
428         return true;
429 }
430
431 void md_handle_request(struct mddev *mddev, struct bio *bio)
432 {
433 check_suspended:
434         rcu_read_lock();
435         if (is_suspended(mddev, bio)) {
436                 DEFINE_WAIT(__wait);
437                 for (;;) {
438                         prepare_to_wait(&mddev->sb_wait, &__wait,
439                                         TASK_UNINTERRUPTIBLE);
440                         if (!is_suspended(mddev, bio))
441                                 break;
442                         rcu_read_unlock();
443                         schedule();
444                         rcu_read_lock();
445                 }
446                 finish_wait(&mddev->sb_wait, &__wait);
447         }
448         atomic_inc(&mddev->active_io);
449         rcu_read_unlock();
450
451         if (!mddev->pers->make_request(mddev, bio)) {
452                 atomic_dec(&mddev->active_io);
453                 wake_up(&mddev->sb_wait);
454                 goto check_suspended;
455         }
456
457         if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
458                 wake_up(&mddev->sb_wait);
459 }
460 EXPORT_SYMBOL(md_handle_request);
461
462 static blk_qc_t md_submit_bio(struct bio *bio)
463 {
464         const int rw = bio_data_dir(bio);
465         const int sgrp = op_stat_group(bio_op(bio));
466         struct mddev *mddev = bio->bi_disk->private_data;
467         unsigned int sectors;
468
469         if (mddev == NULL || mddev->pers == NULL) {
470                 bio_io_error(bio);
471                 return BLK_QC_T_NONE;
472         }
473
474         if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) {
475                 bio_io_error(bio);
476                 return BLK_QC_T_NONE;
477         }
478
479         blk_queue_split(&bio);
480
481         if (mddev->ro == 1 && unlikely(rw == WRITE)) {
482                 if (bio_sectors(bio) != 0)
483                         bio->bi_status = BLK_STS_IOERR;
484                 bio_endio(bio);
485                 return BLK_QC_T_NONE;
486         }
487
488         /*
489          * save the sectors now since our bio can
490          * go away inside make_request
491          */
492         sectors = bio_sectors(bio);
493         /* bio could be mergeable after passing to underlayer */
494         bio->bi_opf &= ~REQ_NOMERGE;
495
496         md_handle_request(mddev, bio);
497
498         part_stat_lock();
499         part_stat_inc(&mddev->gendisk->part0, ios[sgrp]);
500         part_stat_add(&mddev->gendisk->part0, sectors[sgrp], sectors);
501         part_stat_unlock();
502
503         return BLK_QC_T_NONE;
504 }
505
506 /* mddev_suspend makes sure no new requests are submitted
507  * to the device, and that any requests that have been submitted
508  * are completely handled.
509  * Once mddev_detach() is called and completes, the module will be
510  * completely unused.
511  */
512 void mddev_suspend(struct mddev *mddev)
513 {
514         WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk);
515         lockdep_assert_held(&mddev->reconfig_mutex);
516         if (mddev->suspended++)
517                 return;
518         synchronize_rcu();
519         wake_up(&mddev->sb_wait);
520         set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags);
521         smp_mb__after_atomic();
522         wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
523         mddev->pers->quiesce(mddev, 1);
524         clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags);
525         wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags));
526
527         del_timer_sync(&mddev->safemode_timer);
528         /* restrict memory reclaim I/O during raid array is suspend */
529         mddev->noio_flag = memalloc_noio_save();
530 }
531 EXPORT_SYMBOL_GPL(mddev_suspend);
532
533 void mddev_resume(struct mddev *mddev)
534 {
535         /* entred the memalloc scope from mddev_suspend() */
536         memalloc_noio_restore(mddev->noio_flag);
537         lockdep_assert_held(&mddev->reconfig_mutex);
538         if (--mddev->suspended)
539                 return;
540         wake_up(&mddev->sb_wait);
541         mddev->pers->quiesce(mddev, 0);
542
543         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
544         md_wakeup_thread(mddev->thread);
545         md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
546 }
547 EXPORT_SYMBOL_GPL(mddev_resume);
548
549 /*
550  * Generic flush handling for md
551  */
552
553 static void md_end_flush(struct bio *bio)
554 {
555         struct md_rdev *rdev = bio->bi_private;
556         struct mddev *mddev = rdev->mddev;
557
558         rdev_dec_pending(rdev, mddev);
559
560         if (atomic_dec_and_test(&mddev->flush_pending)) {
561                 /* The pre-request flush has finished */
562                 queue_work(md_wq, &mddev->flush_work);
563         }
564         bio_put(bio);
565 }
566
567 static void md_submit_flush_data(struct work_struct *ws);
568
569 static void submit_flushes(struct work_struct *ws)
570 {
571         struct mddev *mddev = container_of(ws, struct mddev, flush_work);
572         struct md_rdev *rdev;
573
574         mddev->start_flush = ktime_get_boottime();
575         INIT_WORK(&mddev->flush_work, md_submit_flush_data);
576         atomic_set(&mddev->flush_pending, 1);
577         rcu_read_lock();
578         rdev_for_each_rcu(rdev, mddev)
579                 if (rdev->raid_disk >= 0 &&
580                     !test_bit(Faulty, &rdev->flags)) {
581                         /* Take two references, one is dropped
582                          * when request finishes, one after
583                          * we reclaim rcu_read_lock
584                          */
585                         struct bio *bi;
586                         atomic_inc(&rdev->nr_pending);
587                         atomic_inc(&rdev->nr_pending);
588                         rcu_read_unlock();
589                         bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
590                         bi->bi_end_io = md_end_flush;
591                         bi->bi_private = rdev;
592                         bio_set_dev(bi, rdev->bdev);
593                         bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
594                         atomic_inc(&mddev->flush_pending);
595                         submit_bio(bi);
596                         rcu_read_lock();
597                         rdev_dec_pending(rdev, mddev);
598                 }
599         rcu_read_unlock();
600         if (atomic_dec_and_test(&mddev->flush_pending))
601                 queue_work(md_wq, &mddev->flush_work);
602 }
603
604 static void md_submit_flush_data(struct work_struct *ws)
605 {
606         struct mddev *mddev = container_of(ws, struct mddev, flush_work);
607         struct bio *bio = mddev->flush_bio;
608
609         /*
610          * must reset flush_bio before calling into md_handle_request to avoid a
611          * deadlock, because other bios passed md_handle_request suspend check
612          * could wait for this and below md_handle_request could wait for those
613          * bios because of suspend check
614          */
615         spin_lock_irq(&mddev->lock);
616         mddev->last_flush = mddev->start_flush;
617         mddev->flush_bio = NULL;
618         spin_unlock_irq(&mddev->lock);
619         wake_up(&mddev->sb_wait);
620
621         if (bio->bi_iter.bi_size == 0) {
622                 /* an empty barrier - all done */
623                 bio_endio(bio);
624         } else {
625                 bio->bi_opf &= ~REQ_PREFLUSH;
626                 md_handle_request(mddev, bio);
627         }
628 }
629
630 /*
631  * Manages consolidation of flushes and submitting any flushes needed for
632  * a bio with REQ_PREFLUSH.  Returns true if the bio is finished or is
633  * being finished in another context.  Returns false if the flushing is
634  * complete but still needs the I/O portion of the bio to be processed.
635  */
636 bool md_flush_request(struct mddev *mddev, struct bio *bio)
637 {
638         ktime_t start = ktime_get_boottime();
639         spin_lock_irq(&mddev->lock);
640         wait_event_lock_irq(mddev->sb_wait,
641                             !mddev->flush_bio ||
642                             ktime_after(mddev->last_flush, start),
643                             mddev->lock);
644         if (!ktime_after(mddev->last_flush, start)) {
645                 WARN_ON(mddev->flush_bio);
646                 mddev->flush_bio = bio;
647                 bio = NULL;
648         }
649         spin_unlock_irq(&mddev->lock);
650
651         if (!bio) {
652                 INIT_WORK(&mddev->flush_work, submit_flushes);
653                 queue_work(md_wq, &mddev->flush_work);
654         } else {
655                 /* flush was performed for some other bio while we waited. */
656                 if (bio->bi_iter.bi_size == 0)
657                         /* an empty barrier - all done */
658                         bio_endio(bio);
659                 else {
660                         bio->bi_opf &= ~REQ_PREFLUSH;
661                         return false;
662                 }
663         }
664         return true;
665 }
666 EXPORT_SYMBOL(md_flush_request);
667
668 static inline struct mddev *mddev_get(struct mddev *mddev)
669 {
670         atomic_inc(&mddev->active);
671         return mddev;
672 }
673
674 static void mddev_delayed_delete(struct work_struct *ws);
675
676 static void mddev_put(struct mddev *mddev)
677 {
678         if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
679                 return;
680         if (!mddev->raid_disks && list_empty(&mddev->disks) &&
681             mddev->ctime == 0 && !mddev->hold_active) {
682                 /* Array is not configured at all, and not held active,
683                  * so destroy it */
684                 list_del_init(&mddev->all_mddevs);
685
686                 /*
687                  * Call queue_work inside the spinlock so that
688                  * flush_workqueue() after mddev_find will succeed in waiting
689                  * for the work to be done.
690                  */
691                 INIT_WORK(&mddev->del_work, mddev_delayed_delete);
692                 queue_work(md_misc_wq, &mddev->del_work);
693         }
694         spin_unlock(&all_mddevs_lock);
695 }
696
697 static void md_safemode_timeout(struct timer_list *t);
698
699 void mddev_init(struct mddev *mddev)
700 {
701         kobject_init(&mddev->kobj, &md_ktype);
702         mutex_init(&mddev->open_mutex);
703         mutex_init(&mddev->reconfig_mutex);
704         mutex_init(&mddev->bitmap_info.mutex);
705         INIT_LIST_HEAD(&mddev->disks);
706         INIT_LIST_HEAD(&mddev->all_mddevs);
707         timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0);
708         atomic_set(&mddev->active, 1);
709         atomic_set(&mddev->openers, 0);
710         atomic_set(&mddev->active_io, 0);
711         spin_lock_init(&mddev->lock);
712         atomic_set(&mddev->flush_pending, 0);
713         init_waitqueue_head(&mddev->sb_wait);
714         init_waitqueue_head(&mddev->recovery_wait);
715         mddev->reshape_position = MaxSector;
716         mddev->reshape_backwards = 0;
717         mddev->last_sync_action = "none";
718         mddev->resync_min = 0;
719         mddev->resync_max = MaxSector;
720         mddev->level = LEVEL_NONE;
721 }
722 EXPORT_SYMBOL_GPL(mddev_init);
723
724 static struct mddev *mddev_find_locked(dev_t unit)
725 {
726         struct mddev *mddev;
727
728         list_for_each_entry(mddev, &all_mddevs, all_mddevs)
729                 if (mddev->unit == unit)
730                         return mddev;
731
732         return NULL;
733 }
734
735 static struct mddev *mddev_find(dev_t unit)
736 {
737         struct mddev *mddev;
738
739         if (MAJOR(unit) != MD_MAJOR)
740                 unit &= ~((1 << MdpMinorShift) - 1);
741
742         spin_lock(&all_mddevs_lock);
743         mddev = mddev_find_locked(unit);
744         if (mddev)
745                 mddev_get(mddev);
746         spin_unlock(&all_mddevs_lock);
747
748         return mddev;
749 }
750
751 static struct mddev *mddev_find_or_alloc(dev_t unit)
752 {
753         struct mddev *mddev, *new = NULL;
754
755         if (unit && MAJOR(unit) != MD_MAJOR)
756                 unit &= ~((1<<MdpMinorShift)-1);
757
758  retry:
759         spin_lock(&all_mddevs_lock);
760
761         if (unit) {
762                 mddev = mddev_find_locked(unit);
763                 if (mddev) {
764                         mddev_get(mddev);
765                         spin_unlock(&all_mddevs_lock);
766                         kfree(new);
767                         return mddev;
768                 }
769
770                 if (new) {
771                         list_add(&new->all_mddevs, &all_mddevs);
772                         spin_unlock(&all_mddevs_lock);
773                         new->hold_active = UNTIL_IOCTL;
774                         return new;
775                 }
776         } else if (new) {
777                 /* find an unused unit number */
778                 static int next_minor = 512;
779                 int start = next_minor;
780                 int is_free = 0;
781                 int dev = 0;
782                 while (!is_free) {
783                         dev = MKDEV(MD_MAJOR, next_minor);
784                         next_minor++;
785                         if (next_minor > MINORMASK)
786                                 next_minor = 0;
787                         if (next_minor == start) {
788                                 /* Oh dear, all in use. */
789                                 spin_unlock(&all_mddevs_lock);
790                                 kfree(new);
791                                 return NULL;
792                         }
793
794                         is_free = !mddev_find_locked(dev);
795                 }
796                 new->unit = dev;
797                 new->md_minor = MINOR(dev);
798                 new->hold_active = UNTIL_STOP;
799                 list_add(&new->all_mddevs, &all_mddevs);
800                 spin_unlock(&all_mddevs_lock);
801                 return new;
802         }
803         spin_unlock(&all_mddevs_lock);
804
805         new = kzalloc(sizeof(*new), GFP_KERNEL);
806         if (!new)
807                 return NULL;
808
809         new->unit = unit;
810         if (MAJOR(unit) == MD_MAJOR)
811                 new->md_minor = MINOR(unit);
812         else
813                 new->md_minor = MINOR(unit) >> MdpMinorShift;
814
815         mddev_init(new);
816
817         goto retry;
818 }
819
820 static struct attribute_group md_redundancy_group;
821
822 void mddev_unlock(struct mddev *mddev)
823 {
824         if (mddev->to_remove) {
825                 /* These cannot be removed under reconfig_mutex as
826                  * an access to the files will try to take reconfig_mutex
827                  * while holding the file unremovable, which leads to
828                  * a deadlock.
829                  * So hold set sysfs_active while the remove in happeing,
830                  * and anything else which might set ->to_remove or my
831                  * otherwise change the sysfs namespace will fail with
832                  * -EBUSY if sysfs_active is still set.
833                  * We set sysfs_active under reconfig_mutex and elsewhere
834                  * test it under the same mutex to ensure its correct value
835                  * is seen.
836                  */
837                 struct attribute_group *to_remove = mddev->to_remove;
838                 mddev->to_remove = NULL;
839                 mddev->sysfs_active = 1;
840                 mutex_unlock(&mddev->reconfig_mutex);
841
842                 if (mddev->kobj.sd) {
843                         if (to_remove != &md_redundancy_group)
844                                 sysfs_remove_group(&mddev->kobj, to_remove);
845                         if (mddev->pers == NULL ||
846                             mddev->pers->sync_request == NULL) {
847                                 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
848                                 if (mddev->sysfs_action)
849                                         sysfs_put(mddev->sysfs_action);
850                                 if (mddev->sysfs_completed)
851                                         sysfs_put(mddev->sysfs_completed);
852                                 if (mddev->sysfs_degraded)
853                                         sysfs_put(mddev->sysfs_degraded);
854                                 mddev->sysfs_action = NULL;
855                                 mddev->sysfs_completed = NULL;
856                                 mddev->sysfs_degraded = NULL;
857                         }
858                 }
859                 mddev->sysfs_active = 0;
860         } else
861                 mutex_unlock(&mddev->reconfig_mutex);
862
863         /* As we've dropped the mutex we need a spinlock to
864          * make sure the thread doesn't disappear
865          */
866         spin_lock(&pers_lock);
867         md_wakeup_thread(mddev->thread);
868         wake_up(&mddev->sb_wait);
869         spin_unlock(&pers_lock);
870 }
871 EXPORT_SYMBOL_GPL(mddev_unlock);
872
873 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr)
874 {
875         struct md_rdev *rdev;
876
877         rdev_for_each_rcu(rdev, mddev)
878                 if (rdev->desc_nr == nr)
879                         return rdev;
880
881         return NULL;
882 }
883 EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu);
884
885 static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
886 {
887         struct md_rdev *rdev;
888
889         rdev_for_each(rdev, mddev)
890                 if (rdev->bdev->bd_dev == dev)
891                         return rdev;
892
893         return NULL;
894 }
895
896 struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev)
897 {
898         struct md_rdev *rdev;
899
900         rdev_for_each_rcu(rdev, mddev)
901                 if (rdev->bdev->bd_dev == dev)
902                         return rdev;
903
904         return NULL;
905 }
906 EXPORT_SYMBOL_GPL(md_find_rdev_rcu);
907
908 static struct md_personality *find_pers(int level, char *clevel)
909 {
910         struct md_personality *pers;
911         list_for_each_entry(pers, &pers_list, list) {
912                 if (level != LEVEL_NONE && pers->level == level)
913                         return pers;
914                 if (strcmp(pers->name, clevel)==0)
915                         return pers;
916         }
917         return NULL;
918 }
919
920 /* return the offset of the super block in 512byte sectors */
921 static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
922 {
923         sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512;
924         return MD_NEW_SIZE_SECTORS(num_sectors);
925 }
926
927 static int alloc_disk_sb(struct md_rdev *rdev)
928 {
929         rdev->sb_page = alloc_page(GFP_KERNEL);
930         if (!rdev->sb_page)
931                 return -ENOMEM;
932         return 0;
933 }
934
935 void md_rdev_clear(struct md_rdev *rdev)
936 {
937         if (rdev->sb_page) {
938                 put_page(rdev->sb_page);
939                 rdev->sb_loaded = 0;
940                 rdev->sb_page = NULL;
941                 rdev->sb_start = 0;
942                 rdev->sectors = 0;
943         }
944         if (rdev->bb_page) {
945                 put_page(rdev->bb_page);
946                 rdev->bb_page = NULL;
947         }
948         badblocks_exit(&rdev->badblocks);
949 }
950 EXPORT_SYMBOL_GPL(md_rdev_clear);
951
952 static void super_written(struct bio *bio)
953 {
954         struct md_rdev *rdev = bio->bi_private;
955         struct mddev *mddev = rdev->mddev;
956
957         if (bio->bi_status) {
958                 pr_err("md: %s gets error=%d\n", __func__,
959                        blk_status_to_errno(bio->bi_status));
960                 md_error(mddev, rdev);
961                 if (!test_bit(Faulty, &rdev->flags)
962                     && (bio->bi_opf & MD_FAILFAST)) {
963                         set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags);
964                         set_bit(LastDev, &rdev->flags);
965                 }
966         } else
967                 clear_bit(LastDev, &rdev->flags);
968
969         if (atomic_dec_and_test(&mddev->pending_writes))
970                 wake_up(&mddev->sb_wait);
971         rdev_dec_pending(rdev, mddev);
972         bio_put(bio);
973 }
974
975 void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
976                    sector_t sector, int size, struct page *page)
977 {
978         /* write first size bytes of page to sector of rdev
979          * Increment mddev->pending_writes before returning
980          * and decrement it on completion, waking up sb_wait
981          * if zero is reached.
982          * If an error occurred, call md_error
983          */
984         struct bio *bio;
985         int ff = 0;
986
987         if (!page)
988                 return;
989
990         if (test_bit(Faulty, &rdev->flags))
991                 return;
992
993         bio = md_bio_alloc_sync(mddev);
994
995         atomic_inc(&rdev->nr_pending);
996
997         bio_set_dev(bio, rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev);
998         bio->bi_iter.bi_sector = sector;
999         bio_add_page(bio, page, size, 0);
1000         bio->bi_private = rdev;
1001         bio->bi_end_io = super_written;
1002
1003         if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) &&
1004             test_bit(FailFast, &rdev->flags) &&
1005             !test_bit(LastDev, &rdev->flags))
1006                 ff = MD_FAILFAST;
1007         bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA | ff;
1008
1009         atomic_inc(&mddev->pending_writes);
1010         submit_bio(bio);
1011 }
1012
1013 int md_super_wait(struct mddev *mddev)
1014 {
1015         /* wait for all superblock writes that were scheduled to complete */
1016         wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
1017         if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags))
1018                 return -EAGAIN;
1019         return 0;
1020 }
1021
1022 int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
1023                  struct page *page, int op, int op_flags, bool metadata_op)
1024 {
1025         struct bio *bio = md_bio_alloc_sync(rdev->mddev);
1026         int ret;
1027
1028         if (metadata_op && rdev->meta_bdev)
1029                 bio_set_dev(bio, rdev->meta_bdev);
1030         else
1031                 bio_set_dev(bio, rdev->bdev);
1032         bio_set_op_attrs(bio, op, op_flags);
1033         if (metadata_op)
1034                 bio->bi_iter.bi_sector = sector + rdev->sb_start;
1035         else if (rdev->mddev->reshape_position != MaxSector &&
1036                  (rdev->mddev->reshape_backwards ==
1037                   (sector >= rdev->mddev->reshape_position)))
1038                 bio->bi_iter.bi_sector = sector + rdev->new_data_offset;
1039         else
1040                 bio->bi_iter.bi_sector = sector + rdev->data_offset;
1041         bio_add_page(bio, page, size, 0);
1042
1043         submit_bio_wait(bio);
1044
1045         ret = !bio->bi_status;
1046         bio_put(bio);
1047         return ret;
1048 }
1049 EXPORT_SYMBOL_GPL(sync_page_io);
1050
1051 static int read_disk_sb(struct md_rdev *rdev, int size)
1052 {
1053         char b[BDEVNAME_SIZE];
1054
1055         if (rdev->sb_loaded)
1056                 return 0;
1057
1058         if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true))
1059                 goto fail;
1060         rdev->sb_loaded = 1;
1061         return 0;
1062
1063 fail:
1064         pr_err("md: disabled device %s, could not read superblock.\n",
1065                bdevname(rdev->bdev,b));
1066         return -EINVAL;
1067 }
1068
1069 static int md_uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
1070 {
1071         return  sb1->set_uuid0 == sb2->set_uuid0 &&
1072                 sb1->set_uuid1 == sb2->set_uuid1 &&
1073                 sb1->set_uuid2 == sb2->set_uuid2 &&
1074                 sb1->set_uuid3 == sb2->set_uuid3;
1075 }
1076
1077 static int md_sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
1078 {
1079         int ret;
1080         mdp_super_t *tmp1, *tmp2;
1081
1082         tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
1083         tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
1084
1085         if (!tmp1 || !tmp2) {
1086                 ret = 0;
1087                 goto abort;
1088         }
1089
1090         *tmp1 = *sb1;
1091         *tmp2 = *sb2;
1092
1093         /*
1094          * nr_disks is not constant
1095          */
1096         tmp1->nr_disks = 0;
1097         tmp2->nr_disks = 0;
1098
1099         ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
1100 abort:
1101         kfree(tmp1);
1102         kfree(tmp2);
1103         return ret;
1104 }
1105
1106 static u32 md_csum_fold(u32 csum)
1107 {
1108         csum = (csum & 0xffff) + (csum >> 16);
1109         return (csum & 0xffff) + (csum >> 16);
1110 }
1111
1112 static unsigned int calc_sb_csum(mdp_super_t *sb)
1113 {
1114         u64 newcsum = 0;
1115         u32 *sb32 = (u32*)sb;
1116         int i;
1117         unsigned int disk_csum, csum;
1118
1119         disk_csum = sb->sb_csum;
1120         sb->sb_csum = 0;
1121
1122         for (i = 0; i < MD_SB_BYTES/4 ; i++)
1123                 newcsum += sb32[i];
1124         csum = (newcsum & 0xffffffff) + (newcsum>>32);
1125
1126 #ifdef CONFIG_ALPHA
1127         /* This used to use csum_partial, which was wrong for several
1128          * reasons including that different results are returned on
1129          * different architectures.  It isn't critical that we get exactly
1130          * the same return value as before (we always csum_fold before
1131          * testing, and that removes any differences).  However as we
1132          * know that csum_partial always returned a 16bit value on
1133          * alphas, do a fold to maximise conformity to previous behaviour.
1134          */
1135         sb->sb_csum = md_csum_fold(disk_csum);
1136 #else
1137         sb->sb_csum = disk_csum;
1138 #endif
1139         return csum;
1140 }
1141
1142 /*
1143  * Handle superblock details.
1144  * We want to be able to handle multiple superblock formats
1145  * so we have a common interface to them all, and an array of
1146  * different handlers.
1147  * We rely on user-space to write the initial superblock, and support
1148  * reading and updating of superblocks.
1149  * Interface methods are:
1150  *   int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version)
1151  *      loads and validates a superblock on dev.
1152  *      if refdev != NULL, compare superblocks on both devices
1153  *    Return:
1154  *      0 - dev has a superblock that is compatible with refdev
1155  *      1 - dev has a superblock that is compatible and newer than refdev
1156  *          so dev should be used as the refdev in future
1157  *     -EINVAL superblock incompatible or invalid
1158  *     -othererror e.g. -EIO
1159  *
1160  *   int validate_super(struct mddev *mddev, struct md_rdev *dev)
1161  *      Verify that dev is acceptable into mddev.
1162  *       The first time, mddev->raid_disks will be 0, and data from
1163  *       dev should be merged in.  Subsequent calls check that dev
1164  *       is new enough.  Return 0 or -EINVAL
1165  *
1166  *   void sync_super(struct mddev *mddev, struct md_rdev *dev)
1167  *     Update the superblock for rdev with data in mddev
1168  *     This does not write to disc.
1169  *
1170  */
1171
1172 struct super_type  {
1173         char                *name;
1174         struct module       *owner;
1175         int                 (*load_super)(struct md_rdev *rdev,
1176                                           struct md_rdev *refdev,
1177                                           int minor_version);
1178         int                 (*validate_super)(struct mddev *mddev,
1179                                               struct md_rdev *rdev);
1180         void                (*sync_super)(struct mddev *mddev,
1181                                           struct md_rdev *rdev);
1182         unsigned long long  (*rdev_size_change)(struct md_rdev *rdev,
1183                                                 sector_t num_sectors);
1184         int                 (*allow_new_offset)(struct md_rdev *rdev,
1185                                                 unsigned long long new_offset);
1186 };
1187
1188 /*
1189  * Check that the given mddev has no bitmap.
1190  *
1191  * This function is called from the run method of all personalities that do not
1192  * support bitmaps. It prints an error message and returns non-zero if mddev
1193  * has a bitmap. Otherwise, it returns 0.
1194  *
1195  */
1196 int md_check_no_bitmap(struct mddev *mddev)
1197 {
1198         if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
1199                 return 0;
1200         pr_warn("%s: bitmaps are not supported for %s\n",
1201                 mdname(mddev), mddev->pers->name);
1202         return 1;
1203 }
1204 EXPORT_SYMBOL(md_check_no_bitmap);
1205
1206 /*
1207  * load_super for 0.90.0
1208  */
1209 static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
1210 {
1211         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1212         mdp_super_t *sb;
1213         int ret;
1214         bool spare_disk = true;
1215
1216         /*
1217          * Calculate the position of the superblock (512byte sectors),
1218          * it's at the end of the disk.
1219          *
1220          * It also happens to be a multiple of 4Kb.
1221          */
1222         rdev->sb_start = calc_dev_sboffset(rdev);
1223
1224         ret = read_disk_sb(rdev, MD_SB_BYTES);
1225         if (ret)
1226                 return ret;
1227
1228         ret = -EINVAL;
1229
1230         bdevname(rdev->bdev, b);
1231         sb = page_address(rdev->sb_page);
1232
1233         if (sb->md_magic != MD_SB_MAGIC) {
1234                 pr_warn("md: invalid raid superblock magic on %s\n", b);
1235                 goto abort;
1236         }
1237
1238         if (sb->major_version != 0 ||
1239             sb->minor_version < 90 ||
1240             sb->minor_version > 91) {
1241                 pr_warn("Bad version number %d.%d on %s\n",
1242                         sb->major_version, sb->minor_version, b);
1243                 goto abort;
1244         }
1245
1246         if (sb->raid_disks <= 0)
1247                 goto abort;
1248
1249         if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
1250                 pr_warn("md: invalid superblock checksum on %s\n", b);
1251                 goto abort;
1252         }
1253
1254         rdev->preferred_minor = sb->md_minor;
1255         rdev->data_offset = 0;
1256         rdev->new_data_offset = 0;
1257         rdev->sb_size = MD_SB_BYTES;
1258         rdev->badblocks.shift = -1;
1259
1260         if (sb->level == LEVEL_MULTIPATH)
1261                 rdev->desc_nr = -1;
1262         else
1263                 rdev->desc_nr = sb->this_disk.number;
1264
1265         /* not spare disk, or LEVEL_MULTIPATH */
1266         if (sb->level == LEVEL_MULTIPATH ||
1267                 (rdev->desc_nr >= 0 &&
1268                  rdev->desc_nr < MD_SB_DISKS &&
1269                  sb->disks[rdev->desc_nr].state &
1270                  ((1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))))
1271                 spare_disk = false;
1272
1273         if (!refdev) {
1274                 if (!spare_disk)
1275                         ret = 1;
1276                 else
1277                         ret = 0;
1278         } else {
1279                 __u64 ev1, ev2;
1280                 mdp_super_t *refsb = page_address(refdev->sb_page);
1281                 if (!md_uuid_equal(refsb, sb)) {
1282                         pr_warn("md: %s has different UUID to %s\n",
1283                                 b, bdevname(refdev->bdev,b2));
1284                         goto abort;
1285                 }
1286                 if (!md_sb_equal(refsb, sb)) {
1287                         pr_warn("md: %s has same UUID but different superblock to %s\n",
1288                                 b, bdevname(refdev->bdev, b2));
1289                         goto abort;
1290                 }
1291                 ev1 = md_event(sb);
1292                 ev2 = md_event(refsb);
1293
1294                 if (!spare_disk && ev1 > ev2)
1295                         ret = 1;
1296                 else
1297                         ret = 0;
1298         }
1299         rdev->sectors = rdev->sb_start;
1300         /* Limit to 4TB as metadata cannot record more than that.
1301          * (not needed for Linear and RAID0 as metadata doesn't
1302          * record this size)
1303          */
1304         if ((u64)rdev->sectors >= (2ULL << 32) && sb->level >= 1)
1305                 rdev->sectors = (sector_t)(2ULL << 32) - 2;
1306
1307         if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
1308                 /* "this cannot possibly happen" ... */
1309                 ret = -EINVAL;
1310
1311  abort:
1312         return ret;
1313 }
1314
1315 /*
1316  * validate_super for 0.90.0
1317  */
1318 static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
1319 {
1320         mdp_disk_t *desc;
1321         mdp_super_t *sb = page_address(rdev->sb_page);
1322         __u64 ev1 = md_event(sb);
1323
1324         rdev->raid_disk = -1;
1325         clear_bit(Faulty, &rdev->flags);
1326         clear_bit(In_sync, &rdev->flags);
1327         clear_bit(Bitmap_sync, &rdev->flags);
1328         clear_bit(WriteMostly, &rdev->flags);
1329
1330         if (mddev->raid_disks == 0) {
1331                 mddev->major_version = 0;
1332                 mddev->minor_version = sb->minor_version;
1333                 mddev->patch_version = sb->patch_version;
1334                 mddev->external = 0;
1335                 mddev->chunk_sectors = sb->chunk_size >> 9;
1336                 mddev->ctime = sb->ctime;
1337                 mddev->utime = sb->utime;
1338                 mddev->level = sb->level;
1339                 mddev->clevel[0] = 0;
1340                 mddev->layout = sb->layout;
1341                 mddev->raid_disks = sb->raid_disks;
1342                 mddev->dev_sectors = ((sector_t)sb->size) * 2;
1343                 mddev->events = ev1;
1344                 mddev->bitmap_info.offset = 0;
1345                 mddev->bitmap_info.space = 0;
1346                 /* bitmap can use 60 K after the 4K superblocks */
1347                 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
1348                 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
1349                 mddev->reshape_backwards = 0;
1350
1351                 if (mddev->minor_version >= 91) {
1352                         mddev->reshape_position = sb->reshape_position;
1353                         mddev->delta_disks = sb->delta_disks;
1354                         mddev->new_level = sb->new_level;
1355                         mddev->new_layout = sb->new_layout;
1356                         mddev->new_chunk_sectors = sb->new_chunk >> 9;
1357                         if (mddev->delta_disks < 0)
1358                                 mddev->reshape_backwards = 1;
1359                 } else {
1360                         mddev->reshape_position = MaxSector;
1361                         mddev->delta_disks = 0;
1362                         mddev->new_level = mddev->level;
1363                         mddev->new_layout = mddev->layout;
1364                         mddev->new_chunk_sectors = mddev->chunk_sectors;
1365                 }
1366                 if (mddev->level == 0)
1367                         mddev->layout = -1;
1368
1369                 if (sb->state & (1<<MD_SB_CLEAN))
1370                         mddev->recovery_cp = MaxSector;
1371                 else {
1372                         if (sb->events_hi == sb->cp_events_hi &&
1373                                 sb->events_lo == sb->cp_events_lo) {
1374                                 mddev->recovery_cp = sb->recovery_cp;
1375                         } else
1376                                 mddev->recovery_cp = 0;
1377                 }
1378
1379                 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
1380                 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
1381                 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
1382                 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
1383
1384                 mddev->max_disks = MD_SB_DISKS;
1385
1386                 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
1387                     mddev->bitmap_info.file == NULL) {
1388                         mddev->bitmap_info.offset =
1389                                 mddev->bitmap_info.default_offset;
1390                         mddev->bitmap_info.space =
1391                                 mddev->bitmap_info.default_space;
1392                 }
1393
1394         } else if (mddev->pers == NULL) {
1395                 /* Insist on good event counter while assembling, except
1396                  * for spares (which don't need an event count) */
1397                 ++ev1;
1398                 if (sb->disks[rdev->desc_nr].state & (
1399                             (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
1400                         if (ev1 < mddev->events)
1401                                 return -EINVAL;
1402         } else if (mddev->bitmap) {
1403                 /* if adding to array with a bitmap, then we can accept an
1404                  * older device ... but not too old.
1405                  */
1406                 if (ev1 < mddev->bitmap->events_cleared)
1407                         return 0;
1408                 if (ev1 < mddev->events)
1409                         set_bit(Bitmap_sync, &rdev->flags);
1410         } else {
1411                 if (ev1 < mddev->events)
1412                         /* just a hot-add of a new device, leave raid_disk at -1 */
1413                         return 0;
1414         }
1415
1416         if (mddev->level != LEVEL_MULTIPATH) {
1417                 desc = sb->disks + rdev->desc_nr;
1418
1419                 if (desc->state & (1<<MD_DISK_FAULTY))
1420                         set_bit(Faulty, &rdev->flags);
1421                 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
1422                             desc->raid_disk < mddev->raid_disks */) {
1423                         set_bit(In_sync, &rdev->flags);
1424                         rdev->raid_disk = desc->raid_disk;
1425                         rdev->saved_raid_disk = desc->raid_disk;
1426                 } else if (desc->state & (1<<MD_DISK_ACTIVE)) {
1427                         /* active but not in sync implies recovery up to
1428                          * reshape position.  We don't know exactly where
1429                          * that is, so set to zero for now */
1430                         if (mddev->minor_version >= 91) {
1431                                 rdev->recovery_offset = 0;
1432                                 rdev->raid_disk = desc->raid_disk;
1433                         }
1434                 }
1435                 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
1436                         set_bit(WriteMostly, &rdev->flags);
1437                 if (desc->state & (1<<MD_DISK_FAILFAST))
1438                         set_bit(FailFast, &rdev->flags);
1439         } else /* MULTIPATH are always insync */
1440                 set_bit(In_sync, &rdev->flags);
1441         return 0;
1442 }
1443
1444 /*
1445  * sync_super for 0.90.0
1446  */
1447 static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
1448 {
1449         mdp_super_t *sb;
1450         struct md_rdev *rdev2;
1451         int next_spare = mddev->raid_disks;
1452
1453         /* make rdev->sb match mddev data..
1454          *
1455          * 1/ zero out disks
1456          * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
1457          * 3/ any empty disks < next_spare become removed
1458          *
1459          * disks[0] gets initialised to REMOVED because
1460          * we cannot be sure from other fields if it has
1461          * been initialised or not.
1462          */
1463         int i;
1464         int active=0, working=0,failed=0,spare=0,nr_disks=0;
1465
1466         rdev->sb_size = MD_SB_BYTES;
1467
1468         sb = page_address(rdev->sb_page);
1469
1470         memset(sb, 0, sizeof(*sb));
1471
1472         sb->md_magic = MD_SB_MAGIC;
1473         sb->major_version = mddev->major_version;
1474         sb->patch_version = mddev->patch_version;
1475         sb->gvalid_words  = 0; /* ignored */
1476         memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
1477         memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
1478         memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
1479         memcpy(&sb->set_uuid3, mddev->uuid+12,4);
1480
1481         sb->ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
1482         sb->level = mddev->level;
1483         sb->size = mddev->dev_sectors / 2;
1484         sb->raid_disks = mddev->raid_disks;
1485         sb->md_minor = mddev->md_minor;
1486         sb->not_persistent = 0;
1487         sb->utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
1488         sb->state = 0;
1489         sb->events_hi = (mddev->events>>32);
1490         sb->events_lo = (u32)mddev->events;
1491
1492         if (mddev->reshape_position == MaxSector)
1493                 sb->minor_version = 90;
1494         else {
1495                 sb->minor_version = 91;
1496                 sb->reshape_position = mddev->reshape_position;
1497                 sb->new_level = mddev->new_level;
1498                 sb->delta_disks = mddev->delta_disks;
1499                 sb->new_layout = mddev->new_layout;
1500                 sb->new_chunk = mddev->new_chunk_sectors << 9;
1501         }
1502         mddev->minor_version = sb->minor_version;
1503         if (mddev->in_sync)
1504         {
1505                 sb->recovery_cp = mddev->recovery_cp;
1506                 sb->cp_events_hi = (mddev->events>>32);
1507                 sb->cp_events_lo = (u32)mddev->events;
1508                 if (mddev->recovery_cp == MaxSector)
1509                         sb->state = (1<< MD_SB_CLEAN);
1510         } else
1511                 sb->recovery_cp = 0;
1512
1513         sb->layout = mddev->layout;
1514         sb->chunk_size = mddev->chunk_sectors << 9;
1515
1516         if (mddev->bitmap && mddev->bitmap_info.file == NULL)
1517                 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
1518
1519         sb->disks[0].state = (1<<MD_DISK_REMOVED);
1520         rdev_for_each(rdev2, mddev) {
1521                 mdp_disk_t *d;
1522                 int desc_nr;
1523                 int is_active = test_bit(In_sync, &rdev2->flags);
1524
1525                 if (rdev2->raid_disk >= 0 &&
1526                     sb->minor_version >= 91)
1527                         /* we have nowhere to store the recovery_offset,
1528                          * but if it is not below the reshape_position,
1529                          * we can piggy-back on that.
1530                          */
1531                         is_active = 1;
1532                 if (rdev2->raid_disk < 0 ||
1533                     test_bit(Faulty, &rdev2->flags))
1534                         is_active = 0;
1535                 if (is_active)
1536                         desc_nr = rdev2->raid_disk;
1537                 else
1538                         desc_nr = next_spare++;
1539                 rdev2->desc_nr = desc_nr;
1540                 d = &sb->disks[rdev2->desc_nr];
1541                 nr_disks++;
1542                 d->number = rdev2->desc_nr;
1543                 d->major = MAJOR(rdev2->bdev->bd_dev);
1544                 d->minor = MINOR(rdev2->bdev->bd_dev);
1545                 if (is_active)
1546                         d->raid_disk = rdev2->raid_disk;
1547                 else
1548                         d->raid_disk = rdev2->desc_nr; /* compatibility */
1549                 if (test_bit(Faulty, &rdev2->flags))
1550                         d->state = (1<<MD_DISK_FAULTY);
1551                 else if (is_active) {
1552                         d->state = (1<<MD_DISK_ACTIVE);
1553                         if (test_bit(In_sync, &rdev2->flags))
1554                                 d->state |= (1<<MD_DISK_SYNC);
1555                         active++;
1556                         working++;
1557                 } else {
1558                         d->state = 0;
1559                         spare++;
1560                         working++;
1561                 }
1562                 if (test_bit(WriteMostly, &rdev2->flags))
1563                         d->state |= (1<<MD_DISK_WRITEMOSTLY);
1564                 if (test_bit(FailFast, &rdev2->flags))
1565                         d->state |= (1<<MD_DISK_FAILFAST);
1566         }
1567         /* now set the "removed" and "faulty" bits on any missing devices */
1568         for (i=0 ; i < mddev->raid_disks ; i++) {
1569                 mdp_disk_t *d = &sb->disks[i];
1570                 if (d->state == 0 && d->number == 0) {
1571                         d->number = i;
1572                         d->raid_disk = i;
1573                         d->state = (1<<MD_DISK_REMOVED);
1574                         d->state |= (1<<MD_DISK_FAULTY);
1575                         failed++;
1576                 }
1577         }
1578         sb->nr_disks = nr_disks;
1579         sb->active_disks = active;
1580         sb->working_disks = working;
1581         sb->failed_disks = failed;
1582         sb->spare_disks = spare;
1583
1584         sb->this_disk = sb->disks[rdev->desc_nr];
1585         sb->sb_csum = calc_sb_csum(sb);
1586 }
1587
1588 /*
1589  * rdev_size_change for 0.90.0
1590  */
1591 static unsigned long long
1592 super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1593 {
1594         if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1595                 return 0; /* component must fit device */
1596         if (rdev->mddev->bitmap_info.offset)
1597                 return 0; /* can't move bitmap */
1598         rdev->sb_start = calc_dev_sboffset(rdev);
1599         if (!num_sectors || num_sectors > rdev->sb_start)
1600                 num_sectors = rdev->sb_start;
1601         /* Limit to 4TB as metadata cannot record more than that.
1602          * 4TB == 2^32 KB, or 2*2^32 sectors.
1603          */
1604         if ((u64)num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
1605                 num_sectors = (sector_t)(2ULL << 32) - 2;
1606         do {
1607                 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1608                        rdev->sb_page);
1609         } while (md_super_wait(rdev->mddev) < 0);
1610         return num_sectors;
1611 }
1612
1613 static int
1614 super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset)
1615 {
1616         /* non-zero offset changes not possible with v0.90 */
1617         return new_offset == 0;
1618 }
1619
1620 /*
1621  * version 1 superblock
1622  */
1623
1624 static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb)
1625 {
1626         __le32 disk_csum;
1627         u32 csum;
1628         unsigned long long newcsum;
1629         int size = 256 + le32_to_cpu(sb->max_dev)*2;
1630         __le32 *isuper = (__le32*)sb;
1631
1632         disk_csum = sb->sb_csum;
1633         sb->sb_csum = 0;
1634         newcsum = 0;
1635         for (; size >= 4; size -= 4)
1636                 newcsum += le32_to_cpu(*isuper++);
1637
1638         if (size == 2)
1639                 newcsum += le16_to_cpu(*(__le16*) isuper);
1640
1641         csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1642         sb->sb_csum = disk_csum;
1643         return cpu_to_le32(csum);
1644 }
1645
1646 static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
1647 {
1648         struct mdp_superblock_1 *sb;
1649         int ret;
1650         sector_t sb_start;
1651         sector_t sectors;
1652         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1653         int bmask;
1654         bool spare_disk = true;
1655
1656         /*
1657          * Calculate the position of the superblock in 512byte sectors.
1658          * It is always aligned to a 4K boundary and
1659          * depeding on minor_version, it can be:
1660          * 0: At least 8K, but less than 12K, from end of device
1661          * 1: At start of device
1662          * 2: 4K from start of device.
1663          */
1664         switch(minor_version) {
1665         case 0:
1666                 sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
1667                 sb_start -= 8*2;
1668                 sb_start &= ~(sector_t)(4*2-1);
1669                 break;
1670         case 1:
1671                 sb_start = 0;
1672                 break;
1673         case 2:
1674                 sb_start = 8;
1675                 break;
1676         default:
1677                 return -EINVAL;
1678         }
1679         rdev->sb_start = sb_start;
1680
1681         /* superblock is rarely larger than 1K, but it can be larger,
1682          * and it is safe to read 4k, so we do that
1683          */
1684         ret = read_disk_sb(rdev, 4096);
1685         if (ret) return ret;
1686
1687         sb = page_address(rdev->sb_page);
1688
1689         if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1690             sb->major_version != cpu_to_le32(1) ||
1691             le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1692             le64_to_cpu(sb->super_offset) != rdev->sb_start ||
1693             (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1694                 return -EINVAL;
1695
1696         if (calc_sb_1_csum(sb) != sb->sb_csum) {
1697                 pr_warn("md: invalid superblock checksum on %s\n",
1698                         bdevname(rdev->bdev,b));
1699                 return -EINVAL;
1700         }
1701         if (le64_to_cpu(sb->data_size) < 10) {
1702                 pr_warn("md: data_size too small on %s\n",
1703                         bdevname(rdev->bdev,b));
1704                 return -EINVAL;
1705         }
1706         if (sb->pad0 ||
1707             sb->pad3[0] ||
1708             memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1])))
1709                 /* Some padding is non-zero, might be a new feature */
1710                 return -EINVAL;
1711
1712         rdev->preferred_minor = 0xffff;
1713         rdev->data_offset = le64_to_cpu(sb->data_offset);
1714         rdev->new_data_offset = rdev->data_offset;
1715         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
1716             (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
1717                 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
1718         atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1719
1720         rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1721         bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1722         if (rdev->sb_size & bmask)
1723                 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1724
1725         if (minor_version
1726             && rdev->data_offset < sb_start + (rdev->sb_size/512))
1727                 return -EINVAL;
1728         if (minor_version
1729             && rdev->new_data_offset < sb_start + (rdev->sb_size/512))
1730                 return -EINVAL;
1731
1732         if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1733                 rdev->desc_nr = -1;
1734         else
1735                 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1736
1737         if (!rdev->bb_page) {
1738                 rdev->bb_page = alloc_page(GFP_KERNEL);
1739                 if (!rdev->bb_page)
1740                         return -ENOMEM;
1741         }
1742         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
1743             rdev->badblocks.count == 0) {
1744                 /* need to load the bad block list.
1745                  * Currently we limit it to one page.
1746                  */
1747                 s32 offset;
1748                 sector_t bb_sector;
1749                 __le64 *bbp;
1750                 int i;
1751                 int sectors = le16_to_cpu(sb->bblog_size);
1752                 if (sectors > (PAGE_SIZE / 512))
1753                         return -EINVAL;
1754                 offset = le32_to_cpu(sb->bblog_offset);
1755                 if (offset == 0)
1756                         return -EINVAL;
1757                 bb_sector = (long long)offset;
1758                 if (!sync_page_io(rdev, bb_sector, sectors << 9,
1759                                   rdev->bb_page, REQ_OP_READ, 0, true))
1760                         return -EIO;
1761                 bbp = (__le64 *)page_address(rdev->bb_page);
1762                 rdev->badblocks.shift = sb->bblog_shift;
1763                 for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
1764                         u64 bb = le64_to_cpu(*bbp);
1765                         int count = bb & (0x3ff);
1766                         u64 sector = bb >> 10;
1767                         sector <<= sb->bblog_shift;
1768                         count <<= sb->bblog_shift;
1769                         if (bb + 1 == 0)
1770                                 break;
1771                         if (badblocks_set(&rdev->badblocks, sector, count, 1))
1772                                 return -EINVAL;
1773                 }
1774         } else if (sb->bblog_offset != 0)
1775                 rdev->badblocks.shift = 0;
1776
1777         if ((le32_to_cpu(sb->feature_map) &
1778             (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS))) {
1779                 rdev->ppl.offset = (__s16)le16_to_cpu(sb->ppl.offset);
1780                 rdev->ppl.size = le16_to_cpu(sb->ppl.size);
1781                 rdev->ppl.sector = rdev->sb_start + rdev->ppl.offset;
1782         }
1783
1784         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT) &&
1785             sb->level != 0)
1786                 return -EINVAL;
1787
1788         /* not spare disk, or LEVEL_MULTIPATH */
1789         if (sb->level == cpu_to_le32(LEVEL_MULTIPATH) ||
1790                 (rdev->desc_nr >= 0 &&
1791                 rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1792                 (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
1793                  le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL)))
1794                 spare_disk = false;
1795
1796         if (!refdev) {
1797                 if (!spare_disk)
1798                         ret = 1;
1799                 else
1800                         ret = 0;
1801         } else {
1802                 __u64 ev1, ev2;
1803                 struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
1804
1805                 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1806                     sb->level != refsb->level ||
1807                     sb->layout != refsb->layout ||
1808                     sb->chunksize != refsb->chunksize) {
1809                         pr_warn("md: %s has strangely different superblock to %s\n",
1810                                 bdevname(rdev->bdev,b),
1811                                 bdevname(refdev->bdev,b2));
1812                         return -EINVAL;
1813                 }
1814                 ev1 = le64_to_cpu(sb->events);
1815                 ev2 = le64_to_cpu(refsb->events);
1816
1817                 if (!spare_disk && ev1 > ev2)
1818                         ret = 1;
1819                 else
1820                         ret = 0;
1821         }
1822         if (minor_version) {
1823                 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9);
1824                 sectors -= rdev->data_offset;
1825         } else
1826                 sectors = rdev->sb_start;
1827         if (sectors < le64_to_cpu(sb->data_size))
1828                 return -EINVAL;
1829         rdev->sectors = le64_to_cpu(sb->data_size);
1830         return ret;
1831 }
1832
1833 static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
1834 {
1835         struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
1836         __u64 ev1 = le64_to_cpu(sb->events);
1837
1838         rdev->raid_disk = -1;
1839         clear_bit(Faulty, &rdev->flags);
1840         clear_bit(In_sync, &rdev->flags);
1841         clear_bit(Bitmap_sync, &rdev->flags);
1842         clear_bit(WriteMostly, &rdev->flags);
1843
1844         if (mddev->raid_disks == 0) {
1845                 mddev->major_version = 1;
1846                 mddev->patch_version = 0;
1847                 mddev->external = 0;
1848                 mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
1849                 mddev->ctime = le64_to_cpu(sb->ctime);
1850                 mddev->utime = le64_to_cpu(sb->utime);
1851                 mddev->level = le32_to_cpu(sb->level);
1852                 mddev->clevel[0] = 0;
1853                 mddev->layout = le32_to_cpu(sb->layout);
1854                 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1855                 mddev->dev_sectors = le64_to_cpu(sb->size);
1856                 mddev->events = ev1;
1857                 mddev->bitmap_info.offset = 0;
1858                 mddev->bitmap_info.space = 0;
1859                 /* Default location for bitmap is 1K after superblock
1860                  * using 3K - total of 4K
1861                  */
1862                 mddev->bitmap_info.default_offset = 1024 >> 9;
1863                 mddev->bitmap_info.default_space = (4096-1024) >> 9;
1864                 mddev->reshape_backwards = 0;
1865
1866                 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1867                 memcpy(mddev->uuid, sb->set_uuid, 16);
1868
1869                 mddev->max_disks =  (4096-256)/2;
1870
1871                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1872                     mddev->bitmap_info.file == NULL) {
1873                         mddev->bitmap_info.offset =
1874                                 (__s32)le32_to_cpu(sb->bitmap_offset);
1875                         /* Metadata doesn't record how much space is available.
1876                          * For 1.0, we assume we can use up to the superblock
1877                          * if before, else to 4K beyond superblock.
1878                          * For others, assume no change is possible.
1879                          */
1880                         if (mddev->minor_version > 0)
1881                                 mddev->bitmap_info.space = 0;
1882                         else if (mddev->bitmap_info.offset > 0)
1883                                 mddev->bitmap_info.space =
1884                                         8 - mddev->bitmap_info.offset;
1885                         else
1886                                 mddev->bitmap_info.space =
1887                                         -mddev->bitmap_info.offset;
1888                 }
1889
1890                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1891                         mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1892                         mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1893                         mddev->new_level = le32_to_cpu(sb->new_level);
1894                         mddev->new_layout = le32_to_cpu(sb->new_layout);
1895                         mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
1896                         if (mddev->delta_disks < 0 ||
1897                             (mddev->delta_disks == 0 &&
1898                              (le32_to_cpu(sb->feature_map)
1899                               & MD_FEATURE_RESHAPE_BACKWARDS)))
1900                                 mddev->reshape_backwards = 1;
1901                 } else {
1902                         mddev->reshape_position = MaxSector;
1903                         mddev->delta_disks = 0;
1904                         mddev->new_level = mddev->level;
1905                         mddev->new_layout = mddev->layout;
1906                         mddev->new_chunk_sectors = mddev->chunk_sectors;
1907                 }
1908
1909                 if (mddev->level == 0 &&
1910                     !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT))
1911                         mddev->layout = -1;
1912
1913                 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)
1914                         set_bit(MD_HAS_JOURNAL, &mddev->flags);
1915
1916                 if (le32_to_cpu(sb->feature_map) &
1917                     (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS)) {
1918                         if (le32_to_cpu(sb->feature_map) &
1919                             (MD_FEATURE_BITMAP_OFFSET | MD_FEATURE_JOURNAL))
1920                                 return -EINVAL;
1921                         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_PPL) &&
1922                             (le32_to_cpu(sb->feature_map) &
1923                                             MD_FEATURE_MULTIPLE_PPLS))
1924                                 return -EINVAL;
1925                         set_bit(MD_HAS_PPL, &mddev->flags);
1926                 }
1927         } else if (mddev->pers == NULL) {
1928                 /* Insist of good event counter while assembling, except for
1929                  * spares (which don't need an event count) */
1930                 ++ev1;
1931                 if (rdev->desc_nr >= 0 &&
1932                     rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1933                     (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
1934                      le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL))
1935                         if (ev1 < mddev->events)
1936                                 return -EINVAL;
1937         } else if (mddev->bitmap) {
1938                 /* If adding to array with a bitmap, then we can accept an
1939                  * older device, but not too old.
1940                  */
1941                 if (ev1 < mddev->bitmap->events_cleared)
1942                         return 0;
1943                 if (ev1 < mddev->events)
1944                         set_bit(Bitmap_sync, &rdev->flags);
1945         } else {
1946                 if (ev1 < mddev->events)
1947                         /* just a hot-add of a new device, leave raid_disk at -1 */
1948                         return 0;
1949         }
1950         if (mddev->level != LEVEL_MULTIPATH) {
1951                 int role;
1952                 if (rdev->desc_nr < 0 ||
1953                     rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
1954                         role = MD_DISK_ROLE_SPARE;
1955                         rdev->desc_nr = -1;
1956                 } else
1957                         role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1958                 switch(role) {
1959                 case MD_DISK_ROLE_SPARE: /* spare */
1960                         break;
1961                 case MD_DISK_ROLE_FAULTY: /* faulty */
1962                         set_bit(Faulty, &rdev->flags);
1963                         break;
1964                 case MD_DISK_ROLE_JOURNAL: /* journal device */
1965                         if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) {
1966                                 /* journal device without journal feature */
1967                                 pr_warn("md: journal device provided without journal feature, ignoring the device\n");
1968                                 return -EINVAL;
1969                         }
1970                         set_bit(Journal, &rdev->flags);
1971                         rdev->journal_tail = le64_to_cpu(sb->journal_tail);
1972                         rdev->raid_disk = 0;
1973                         break;
1974                 default:
1975                         rdev->saved_raid_disk = role;
1976                         if ((le32_to_cpu(sb->feature_map) &
1977                              MD_FEATURE_RECOVERY_OFFSET)) {
1978                                 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1979                                 if (!(le32_to_cpu(sb->feature_map) &
1980                                       MD_FEATURE_RECOVERY_BITMAP))
1981                                         rdev->saved_raid_disk = -1;
1982                         } else {
1983                                 /*
1984                                  * If the array is FROZEN, then the device can't
1985                                  * be in_sync with rest of array.
1986                                  */
1987                                 if (!test_bit(MD_RECOVERY_FROZEN,
1988                                               &mddev->recovery))
1989                                         set_bit(In_sync, &rdev->flags);
1990                         }
1991                         rdev->raid_disk = role;
1992                         break;
1993                 }
1994                 if (sb->devflags & WriteMostly1)
1995                         set_bit(WriteMostly, &rdev->flags);
1996                 if (sb->devflags & FailFast1)
1997                         set_bit(FailFast, &rdev->flags);
1998                 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
1999                         set_bit(Replacement, &rdev->flags);
2000         } else /* MULTIPATH are always insync */
2001                 set_bit(In_sync, &rdev->flags);
2002
2003         return 0;
2004 }
2005
2006 static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
2007 {
2008         struct mdp_superblock_1 *sb;
2009         struct md_rdev *rdev2;
2010         int max_dev, i;
2011         /* make rdev->sb match mddev and rdev data. */
2012
2013         sb = page_address(rdev->sb_page);
2014
2015         sb->feature_map = 0;
2016         sb->pad0 = 0;
2017         sb->recovery_offset = cpu_to_le64(0);
2018         memset(sb->pad3, 0, sizeof(sb->pad3));
2019
2020         sb->utime = cpu_to_le64((__u64)mddev->utime);
2021         sb->events = cpu_to_le64(mddev->events);
2022         if (mddev->in_sync)
2023                 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
2024         else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags))
2025                 sb->resync_offset = cpu_to_le64(MaxSector);
2026         else
2027                 sb->resync_offset = cpu_to_le64(0);
2028
2029         sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
2030
2031         sb->raid_disks = cpu_to_le32(mddev->raid_disks);
2032         sb->size = cpu_to_le64(mddev->dev_sectors);
2033         sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
2034         sb->level = cpu_to_le32(mddev->level);
2035         sb->layout = cpu_to_le32(mddev->layout);
2036         if (test_bit(FailFast, &rdev->flags))
2037                 sb->devflags |= FailFast1;
2038         else
2039                 sb->devflags &= ~FailFast1;
2040
2041         if (test_bit(WriteMostly, &rdev->flags))
2042                 sb->devflags |= WriteMostly1;
2043         else
2044                 sb->devflags &= ~WriteMostly1;
2045         sb->data_offset = cpu_to_le64(rdev->data_offset);
2046         sb->data_size = cpu_to_le64(rdev->sectors);
2047
2048         if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
2049                 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
2050                 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
2051         }
2052
2053         if (rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags) &&
2054             !test_bit(In_sync, &rdev->flags)) {
2055                 sb->feature_map |=
2056                         cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
2057                 sb->recovery_offset =
2058                         cpu_to_le64(rdev->recovery_offset);
2059                 if (rdev->saved_raid_disk >= 0 && mddev->bitmap)
2060                         sb->feature_map |=
2061                                 cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP);
2062         }
2063         /* Note: recovery_offset and journal_tail share space  */
2064         if (test_bit(Journal, &rdev->flags))
2065                 sb->journal_tail = cpu_to_le64(rdev->journal_tail);
2066         if (test_bit(Replacement, &rdev->flags))
2067                 sb->feature_map |=
2068                         cpu_to_le32(MD_FEATURE_REPLACEMENT);
2069
2070         if (mddev->reshape_position != MaxSector) {
2071                 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
2072                 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
2073                 sb->new_layout = cpu_to_le32(mddev->new_layout);
2074                 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
2075                 sb->new_level = cpu_to_le32(mddev->new_level);
2076                 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
2077                 if (mddev->delta_disks == 0 &&
2078                     mddev->reshape_backwards)
2079                         sb->feature_map
2080                                 |= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS);
2081                 if (rdev->new_data_offset != rdev->data_offset) {
2082                         sb->feature_map
2083                                 |= cpu_to_le32(MD_FEATURE_NEW_OFFSET);
2084                         sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset
2085                                                              - rdev->data_offset));
2086                 }
2087         }
2088
2089         if (mddev_is_clustered(mddev))
2090                 sb->feature_map |= cpu_to_le32(MD_FEATURE_CLUSTERED);
2091
2092         if (rdev->badblocks.count == 0)
2093                 /* Nothing to do for bad blocks*/ ;
2094         else if (sb->bblog_offset == 0)
2095                 /* Cannot record bad blocks on this device */
2096                 md_error(mddev, rdev);
2097         else {
2098                 struct badblocks *bb = &rdev->badblocks;
2099                 __le64 *bbp = (__le64 *)page_address(rdev->bb_page);
2100                 u64 *p = bb->page;
2101                 sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
2102                 if (bb->changed) {
2103                         unsigned seq;
2104
2105 retry:
2106                         seq = read_seqbegin(&bb->lock);
2107
2108                         memset(bbp, 0xff, PAGE_SIZE);
2109
2110                         for (i = 0 ; i < bb->count ; i++) {
2111                                 u64 internal_bb = p[i];
2112                                 u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
2113                                                 | BB_LEN(internal_bb));
2114                                 bbp[i] = cpu_to_le64(store_bb);
2115                         }
2116                         bb->changed = 0;
2117                         if (read_seqretry(&bb->lock, seq))
2118                                 goto retry;
2119
2120                         bb->sector = (rdev->sb_start +
2121                                       (int)le32_to_cpu(sb->bblog_offset));
2122                         bb->size = le16_to_cpu(sb->bblog_size);
2123                 }
2124         }
2125
2126         max_dev = 0;
2127         rdev_for_each(rdev2, mddev)
2128                 if (rdev2->desc_nr+1 > max_dev)
2129                         max_dev = rdev2->desc_nr+1;
2130
2131         if (max_dev > le32_to_cpu(sb->max_dev)) {
2132                 int bmask;
2133                 sb->max_dev = cpu_to_le32(max_dev);
2134                 rdev->sb_size = max_dev * 2 + 256;
2135                 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
2136                 if (rdev->sb_size & bmask)
2137                         rdev->sb_size = (rdev->sb_size | bmask) + 1;
2138         } else
2139                 max_dev = le32_to_cpu(sb->max_dev);
2140
2141         for (i=0; i<max_dev;i++)
2142                 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
2143
2144         if (test_bit(MD_HAS_JOURNAL, &mddev->flags))
2145                 sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL);
2146
2147         if (test_bit(MD_HAS_PPL, &mddev->flags)) {
2148                 if (test_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags))
2149                         sb->feature_map |=
2150                             cpu_to_le32(MD_FEATURE_MULTIPLE_PPLS);
2151                 else
2152                         sb->feature_map |= cpu_to_le32(MD_FEATURE_PPL);
2153                 sb->ppl.offset = cpu_to_le16(rdev->ppl.offset);
2154                 sb->ppl.size = cpu_to_le16(rdev->ppl.size);
2155         }
2156
2157         rdev_for_each(rdev2, mddev) {
2158                 i = rdev2->desc_nr;
2159                 if (test_bit(Faulty, &rdev2->flags))
2160                         sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY);
2161                 else if (test_bit(In_sync, &rdev2->flags))
2162                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
2163                 else if (test_bit(Journal, &rdev2->flags))
2164                         sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_JOURNAL);
2165                 else if (rdev2->raid_disk >= 0)
2166                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
2167                 else
2168                         sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
2169         }
2170
2171         sb->sb_csum = calc_sb_1_csum(sb);
2172 }
2173
2174 static sector_t super_1_choose_bm_space(sector_t dev_size)
2175 {
2176         sector_t bm_space;
2177
2178         /* if the device is bigger than 8Gig, save 64k for bitmap
2179          * usage, if bigger than 200Gig, save 128k
2180          */
2181         if (dev_size < 64*2)
2182                 bm_space = 0;
2183         else if (dev_size - 64*2 >= 200*1024*1024*2)
2184                 bm_space = 128*2;
2185         else if (dev_size - 4*2 > 8*1024*1024*2)
2186                 bm_space = 64*2;
2187         else
2188                 bm_space = 4*2;
2189         return bm_space;
2190 }
2191
2192 static unsigned long long
2193 super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
2194 {
2195         struct mdp_superblock_1 *sb;
2196         sector_t max_sectors;
2197         if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
2198                 return 0; /* component must fit device */
2199         if (rdev->data_offset != rdev->new_data_offset)
2200                 return 0; /* too confusing */
2201         if (rdev->sb_start < rdev->data_offset) {
2202                 /* minor versions 1 and 2; superblock before data */
2203                 max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
2204                 max_sectors -= rdev->data_offset;
2205                 if (!num_sectors || num_sectors > max_sectors)
2206                         num_sectors = max_sectors;
2207         } else if (rdev->mddev->bitmap_info.offset) {
2208                 /* minor version 0 with bitmap we can't move */
2209                 return 0;
2210         } else {
2211                 /* minor version 0; superblock after data */
2212                 sector_t sb_start, bm_space;
2213                 sector_t dev_size = i_size_read(rdev->bdev->bd_inode) >> 9;
2214
2215                 /* 8K is for superblock */
2216                 sb_start = dev_size - 8*2;
2217                 sb_start &= ~(sector_t)(4*2 - 1);
2218
2219                 bm_space = super_1_choose_bm_space(dev_size);
2220
2221                 /* Space that can be used to store date needs to decrease
2222                  * superblock bitmap space and bad block space(4K)
2223                  */
2224                 max_sectors = sb_start - bm_space - 4*2;
2225
2226                 if (!num_sectors || num_sectors > max_sectors)
2227                         num_sectors = max_sectors;
2228                 rdev->sb_start = sb_start;
2229         }
2230         sb = page_address(rdev->sb_page);
2231         sb->data_size = cpu_to_le64(num_sectors);
2232         sb->super_offset = cpu_to_le64(rdev->sb_start);
2233         sb->sb_csum = calc_sb_1_csum(sb);
2234         do {
2235                 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
2236                                rdev->sb_page);
2237         } while (md_super_wait(rdev->mddev) < 0);
2238         return num_sectors;
2239
2240 }
2241
2242 static int
2243 super_1_allow_new_offset(struct md_rdev *rdev,
2244                          unsigned long long new_offset)
2245 {
2246         /* All necessary checks on new >= old have been done */
2247         struct bitmap *bitmap;
2248         if (new_offset >= rdev->data_offset)
2249                 return 1;
2250
2251         /* with 1.0 metadata, there is no metadata to tread on
2252          * so we can always move back */
2253         if (rdev->mddev->minor_version == 0)
2254                 return 1;
2255
2256         /* otherwise we must be sure not to step on
2257          * any metadata, so stay:
2258          * 36K beyond start of superblock
2259          * beyond end of badblocks
2260          * beyond write-intent bitmap
2261          */
2262         if (rdev->sb_start + (32+4)*2 > new_offset)
2263                 return 0;
2264         bitmap = rdev->mddev->bitmap;
2265         if (bitmap && !rdev->mddev->bitmap_info.file &&
2266             rdev->sb_start + rdev->mddev->bitmap_info.offset +
2267             bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset)
2268                 return 0;
2269         if (rdev->badblocks.sector + rdev->badblocks.size > new_offset)
2270                 return 0;
2271
2272         return 1;
2273 }
2274
2275 static struct super_type super_types[] = {
2276         [0] = {
2277                 .name   = "0.90.0",
2278                 .owner  = THIS_MODULE,
2279                 .load_super         = super_90_load,
2280                 .validate_super     = super_90_validate,
2281                 .sync_super         = super_90_sync,
2282                 .rdev_size_change   = super_90_rdev_size_change,
2283                 .allow_new_offset   = super_90_allow_new_offset,
2284         },
2285         [1] = {
2286                 .name   = "md-1",
2287                 .owner  = THIS_MODULE,
2288                 .load_super         = super_1_load,
2289                 .validate_super     = super_1_validate,
2290                 .sync_super         = super_1_sync,
2291                 .rdev_size_change   = super_1_rdev_size_change,
2292                 .allow_new_offset   = super_1_allow_new_offset,
2293         },
2294 };
2295
2296 static void sync_super(struct mddev *mddev, struct md_rdev *rdev)
2297 {
2298         if (mddev->sync_super) {
2299                 mddev->sync_super(mddev, rdev);
2300                 return;
2301         }
2302
2303         BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
2304
2305         super_types[mddev->major_version].sync_super(mddev, rdev);
2306 }
2307
2308 static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2)
2309 {
2310         struct md_rdev *rdev, *rdev2;
2311
2312         rcu_read_lock();
2313         rdev_for_each_rcu(rdev, mddev1) {
2314                 if (test_bit(Faulty, &rdev->flags) ||
2315                     test_bit(Journal, &rdev->flags) ||
2316                     rdev->raid_disk == -1)
2317                         continue;
2318                 rdev_for_each_rcu(rdev2, mddev2) {
2319                         if (test_bit(Faulty, &rdev2->flags) ||
2320                             test_bit(Journal, &rdev2->flags) ||
2321                             rdev2->raid_disk == -1)
2322                                 continue;
2323                         if (rdev->bdev->bd_disk == rdev2->bdev->bd_disk) {
2324                                 rcu_read_unlock();
2325                                 return 1;
2326                         }
2327                 }
2328         }
2329         rcu_read_unlock();
2330         return 0;
2331 }
2332
2333 static LIST_HEAD(pending_raid_disks);
2334
2335 /*
2336  * Try to register data integrity profile for an mddev
2337  *
2338  * This is called when an array is started and after a disk has been kicked
2339  * from the array. It only succeeds if all working and active component devices
2340  * are integrity capable with matching profiles.
2341  */
2342 int md_integrity_register(struct mddev *mddev)
2343 {
2344         struct md_rdev *rdev, *reference = NULL;
2345
2346         if (list_empty(&mddev->disks))
2347                 return 0; /* nothing to do */
2348         if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
2349                 return 0; /* shouldn't register, or already is */
2350         rdev_for_each(rdev, mddev) {
2351                 /* skip spares and non-functional disks */
2352                 if (test_bit(Faulty, &rdev->flags))
2353                         continue;
2354                 if (rdev->raid_disk < 0)
2355                         continue;
2356                 if (!reference) {
2357                         /* Use the first rdev as the reference */
2358                         reference = rdev;
2359                         continue;
2360                 }
2361                 /* does this rdev's profile match the reference profile? */
2362                 if (blk_integrity_compare(reference->bdev->bd_disk,
2363                                 rdev->bdev->bd_disk) < 0)
2364                         return -EINVAL;
2365         }
2366         if (!reference || !bdev_get_integrity(reference->bdev))
2367                 return 0;
2368         /*
2369          * All component devices are integrity capable and have matching
2370          * profiles, register the common profile for the md device.
2371          */
2372         blk_integrity_register(mddev->gendisk,
2373                                bdev_get_integrity(reference->bdev));
2374
2375         pr_debug("md: data integrity enabled on %s\n", mdname(mddev));
2376         if (bioset_integrity_create(&mddev->bio_set, BIO_POOL_SIZE)) {
2377                 pr_err("md: failed to create integrity pool for %s\n",
2378                        mdname(mddev));
2379                 return -EINVAL;
2380         }
2381         return 0;
2382 }
2383 EXPORT_SYMBOL(md_integrity_register);
2384
2385 /*
2386  * Attempt to add an rdev, but only if it is consistent with the current
2387  * integrity profile
2388  */
2389 int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
2390 {
2391         struct blk_integrity *bi_mddev;
2392         char name[BDEVNAME_SIZE];
2393
2394         if (!mddev->gendisk)
2395                 return 0;
2396
2397         bi_mddev = blk_get_integrity(mddev->gendisk);
2398
2399         if (!bi_mddev) /* nothing to do */
2400                 return 0;
2401
2402         if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) {
2403                 pr_err("%s: incompatible integrity profile for %s\n",
2404                        mdname(mddev), bdevname(rdev->bdev, name));
2405                 return -ENXIO;
2406         }
2407
2408         return 0;
2409 }
2410 EXPORT_SYMBOL(md_integrity_add_rdev);
2411
2412 static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
2413 {
2414         char b[BDEVNAME_SIZE];
2415         struct kobject *ko;
2416         int err;
2417
2418         /* prevent duplicates */
2419         if (find_rdev(mddev, rdev->bdev->bd_dev))
2420                 return -EEXIST;
2421
2422         if ((bdev_read_only(rdev->bdev) || bdev_read_only(rdev->meta_bdev)) &&
2423             mddev->pers)
2424                 return -EROFS;
2425
2426         /* make sure rdev->sectors exceeds mddev->dev_sectors */
2427         if (!test_bit(Journal, &rdev->flags) &&
2428             rdev->sectors &&
2429             (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) {
2430                 if (mddev->pers) {
2431                         /* Cannot change size, so fail
2432                          * If mddev->level <= 0, then we don't care
2433                          * about aligning sizes (e.g. linear)
2434                          */
2435                         if (mddev->level > 0)
2436                                 return -ENOSPC;
2437                 } else
2438                         mddev->dev_sectors = rdev->sectors;
2439         }
2440
2441         /* Verify rdev->desc_nr is unique.
2442          * If it is -1, assign a free number, else
2443          * check number is not in use
2444          */
2445         rcu_read_lock();
2446         if (rdev->desc_nr < 0) {
2447                 int choice = 0;
2448                 if (mddev->pers)
2449                         choice = mddev->raid_disks;
2450                 while (md_find_rdev_nr_rcu(mddev, choice))
2451                         choice++;
2452                 rdev->desc_nr = choice;
2453         } else {
2454                 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) {
2455                         rcu_read_unlock();
2456                         return -EBUSY;
2457                 }
2458         }
2459         rcu_read_unlock();
2460         if (!test_bit(Journal, &rdev->flags) &&
2461             mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
2462                 pr_warn("md: %s: array is limited to %d devices\n",
2463                         mdname(mddev), mddev->max_disks);
2464                 return -EBUSY;
2465         }
2466         bdevname(rdev->bdev,b);
2467         strreplace(b, '/', '!');
2468
2469         rdev->mddev = mddev;
2470         pr_debug("md: bind<%s>\n", b);
2471
2472         if (mddev->raid_disks)
2473                 mddev_create_serial_pool(mddev, rdev, false);
2474
2475         if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
2476                 goto fail;
2477
2478         ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
2479         /* failure here is OK */
2480         err = sysfs_create_link(&rdev->kobj, ko, "block");
2481         rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
2482         rdev->sysfs_unack_badblocks =
2483                 sysfs_get_dirent_safe(rdev->kobj.sd, "unacknowledged_bad_blocks");
2484         rdev->sysfs_badblocks =
2485                 sysfs_get_dirent_safe(rdev->kobj.sd, "bad_blocks");
2486
2487         list_add_rcu(&rdev->same_set, &mddev->disks);
2488         bd_link_disk_holder(rdev->bdev, mddev->gendisk);
2489
2490         /* May as well allow recovery to be retried once */
2491         mddev->recovery_disabled++;
2492
2493         return 0;
2494
2495  fail:
2496         pr_warn("md: failed to register dev-%s for %s\n",
2497                 b, mdname(mddev));
2498         return err;
2499 }
2500
2501 static void rdev_delayed_delete(struct work_struct *ws)
2502 {
2503         struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work);
2504         kobject_del(&rdev->kobj);
2505         kobject_put(&rdev->kobj);
2506 }
2507
2508 static void unbind_rdev_from_array(struct md_rdev *rdev)
2509 {
2510         char b[BDEVNAME_SIZE];
2511
2512         bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
2513         list_del_rcu(&rdev->same_set);
2514         pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b));
2515         mddev_destroy_serial_pool(rdev->mddev, rdev, false);
2516         rdev->mddev = NULL;
2517         sysfs_remove_link(&rdev->kobj, "block");
2518         sysfs_put(rdev->sysfs_state);
2519         sysfs_put(rdev->sysfs_unack_badblocks);
2520         sysfs_put(rdev->sysfs_badblocks);
2521         rdev->sysfs_state = NULL;
2522         rdev->sysfs_unack_badblocks = NULL;
2523         rdev->sysfs_badblocks = NULL;
2524         rdev->badblocks.count = 0;
2525         /* We need to delay this, otherwise we can deadlock when
2526          * writing to 'remove' to "dev/state".  We also need
2527          * to delay it due to rcu usage.
2528          */
2529         synchronize_rcu();
2530         INIT_WORK(&rdev->del_work, rdev_delayed_delete);
2531         kobject_get(&rdev->kobj);
2532         queue_work(md_rdev_misc_wq, &rdev->del_work);
2533 }
2534
2535 /*
2536  * prevent the device from being mounted, repartitioned or
2537  * otherwise reused by a RAID array (or any other kernel
2538  * subsystem), by bd_claiming the device.
2539  */
2540 static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared)
2541 {
2542         int err = 0;
2543         struct block_device *bdev;
2544
2545         bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
2546                                  shared ? (struct md_rdev *)lock_rdev : rdev);
2547         if (IS_ERR(bdev)) {
2548                 pr_warn("md: could not open device unknown-block(%u,%u).\n",
2549                         MAJOR(dev), MINOR(dev));
2550                 return PTR_ERR(bdev);
2551         }
2552         rdev->bdev = bdev;
2553         return err;
2554 }
2555
2556 static void unlock_rdev(struct md_rdev *rdev)
2557 {
2558         struct block_device *bdev = rdev->bdev;
2559         rdev->bdev = NULL;
2560         blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
2561 }
2562
2563 void md_autodetect_dev(dev_t dev);
2564
2565 static void export_rdev(struct md_rdev *rdev)
2566 {
2567         char b[BDEVNAME_SIZE];
2568
2569         pr_debug("md: export_rdev(%s)\n", bdevname(rdev->bdev,b));
2570         md_rdev_clear(rdev);
2571 #ifndef MODULE
2572         if (test_bit(AutoDetected, &rdev->flags))
2573                 md_autodetect_dev(rdev->bdev->bd_dev);
2574 #endif
2575         unlock_rdev(rdev);
2576         kobject_put(&rdev->kobj);
2577 }
2578
2579 void md_kick_rdev_from_array(struct md_rdev *rdev)
2580 {
2581         unbind_rdev_from_array(rdev);
2582         export_rdev(rdev);
2583 }
2584 EXPORT_SYMBOL_GPL(md_kick_rdev_from_array);
2585
2586 static void export_array(struct mddev *mddev)
2587 {
2588         struct md_rdev *rdev;
2589
2590         while (!list_empty(&mddev->disks)) {
2591                 rdev = list_first_entry(&mddev->disks, struct md_rdev,
2592                                         same_set);
2593                 md_kick_rdev_from_array(rdev);
2594         }
2595         mddev->raid_disks = 0;
2596         mddev->major_version = 0;
2597 }
2598
2599 static bool set_in_sync(struct mddev *mddev)
2600 {
2601         lockdep_assert_held(&mddev->lock);
2602         if (!mddev->in_sync) {
2603                 mddev->sync_checkers++;
2604                 spin_unlock(&mddev->lock);
2605                 percpu_ref_switch_to_atomic_sync(&mddev->writes_pending);
2606                 spin_lock(&mddev->lock);
2607                 if (!mddev->in_sync &&
2608                     percpu_ref_is_zero(&mddev->writes_pending)) {
2609                         mddev->in_sync = 1;
2610                         /*
2611                          * Ensure ->in_sync is visible before we clear
2612                          * ->sync_checkers.
2613                          */
2614                         smp_mb();
2615                         set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
2616                         sysfs_notify_dirent_safe(mddev->sysfs_state);
2617                 }
2618                 if (--mddev->sync_checkers == 0)
2619                         percpu_ref_switch_to_percpu(&mddev->writes_pending);
2620         }
2621         if (mddev->safemode == 1)
2622                 mddev->safemode = 0;
2623         return mddev->in_sync;
2624 }
2625
2626 static void sync_sbs(struct mddev *mddev, int nospares)
2627 {
2628         /* Update each superblock (in-memory image), but
2629          * if we are allowed to, skip spares which already
2630          * have the right event counter, or have one earlier
2631          * (which would mean they aren't being marked as dirty
2632          * with the rest of the array)
2633          */
2634         struct md_rdev *rdev;
2635         rdev_for_each(rdev, mddev) {
2636                 if (rdev->sb_events == mddev->events ||
2637                     (nospares &&
2638                      rdev->raid_disk < 0 &&
2639                      rdev->sb_events+1 == mddev->events)) {
2640                         /* Don't update this superblock */
2641                         rdev->sb_loaded = 2;
2642                 } else {
2643                         sync_super(mddev, rdev);
2644                         rdev->sb_loaded = 1;
2645                 }
2646         }
2647 }
2648
2649 static bool does_sb_need_changing(struct mddev *mddev)
2650 {
2651         struct md_rdev *rdev = NULL, *iter;
2652         struct mdp_superblock_1 *sb;
2653         int role;
2654
2655         /* Find a good rdev */
2656         rdev_for_each(iter, mddev)
2657                 if ((iter->raid_disk >= 0) && !test_bit(Faulty, &iter->flags)) {
2658                         rdev = iter;
2659                         break;
2660                 }
2661
2662         /* No good device found. */
2663         if (!rdev)
2664                 return false;
2665
2666         sb = page_address(rdev->sb_page);
2667         /* Check if a device has become faulty or a spare become active */
2668         rdev_for_each(rdev, mddev) {
2669                 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
2670                 /* Device activated? */
2671                 if (role == 0xffff && rdev->raid_disk >=0 &&
2672                     !test_bit(Faulty, &rdev->flags))
2673                         return true;
2674                 /* Device turned faulty? */
2675                 if (test_bit(Faulty, &rdev->flags) && (role < 0xfffd))
2676                         return true;
2677         }
2678
2679         /* Check if any mddev parameters have changed */
2680         if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
2681             (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
2682             (mddev->layout != le32_to_cpu(sb->layout)) ||
2683             (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
2684             (mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
2685                 return true;
2686
2687         return false;
2688 }
2689
2690 void md_update_sb(struct mddev *mddev, int force_change)
2691 {
2692         struct md_rdev *rdev;
2693         int sync_req;
2694         int nospares = 0;
2695         int any_badblocks_changed = 0;
2696         int ret = -1;
2697
2698         if (mddev->ro) {
2699                 if (force_change)
2700                         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2701                 return;
2702         }
2703
2704 repeat:
2705         if (mddev_is_clustered(mddev)) {
2706                 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
2707                         force_change = 1;
2708                 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
2709                         nospares = 1;
2710                 ret = md_cluster_ops->metadata_update_start(mddev);
2711                 /* Has someone else has updated the sb */
2712                 if (!does_sb_need_changing(mddev)) {
2713                         if (ret == 0)
2714                                 md_cluster_ops->metadata_update_cancel(mddev);
2715                         bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
2716                                                          BIT(MD_SB_CHANGE_DEVS) |
2717                                                          BIT(MD_SB_CHANGE_CLEAN));
2718                         return;
2719                 }
2720         }
2721
2722         /*
2723          * First make sure individual recovery_offsets are correct
2724          * curr_resync_completed can only be used during recovery.
2725          * During reshape/resync it might use array-addresses rather
2726          * that device addresses.
2727          */
2728         rdev_for_each(rdev, mddev) {
2729                 if (rdev->raid_disk >= 0 &&
2730                     mddev->delta_disks >= 0 &&
2731                     test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
2732                     test_bit(MD_RECOVERY_RECOVER, &mddev->recovery) &&
2733                     !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
2734                     !test_bit(Journal, &rdev->flags) &&
2735                     !test_bit(In_sync, &rdev->flags) &&
2736                     mddev->curr_resync_completed > rdev->recovery_offset)
2737                                 rdev->recovery_offset = mddev->curr_resync_completed;
2738
2739         }
2740         if (!mddev->persistent) {
2741                 clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
2742                 clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2743                 if (!mddev->external) {
2744                         clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
2745                         rdev_for_each(rdev, mddev) {
2746                                 if (rdev->badblocks.changed) {
2747                                         rdev->badblocks.changed = 0;
2748                                         ack_all_badblocks(&rdev->badblocks);
2749                                         md_error(mddev, rdev);
2750                                 }
2751                                 clear_bit(Blocked, &rdev->flags);
2752                                 clear_bit(BlockedBadBlocks, &rdev->flags);
2753                                 wake_up(&rdev->blocked_wait);
2754                         }
2755                 }
2756                 wake_up(&mddev->sb_wait);
2757                 return;
2758         }
2759
2760         spin_lock(&mddev->lock);
2761
2762         mddev->utime = ktime_get_real_seconds();
2763
2764         if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
2765                 force_change = 1;
2766         if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
2767                 /* just a clean<-> dirty transition, possibly leave spares alone,
2768                  * though if events isn't the right even/odd, we will have to do
2769                  * spares after all
2770                  */
2771                 nospares = 1;
2772         if (force_change)
2773                 nospares = 0;
2774         if (mddev->degraded)
2775                 /* If the array is degraded, then skipping spares is both
2776                  * dangerous and fairly pointless.
2777                  * Dangerous because a device that was removed from the array
2778                  * might have a event_count that still looks up-to-date,
2779                  * so it can be re-added without a resync.
2780                  * Pointless because if there are any spares to skip,
2781                  * then a recovery will happen and soon that array won't
2782                  * be degraded any more and the spare can go back to sleep then.
2783                  */
2784                 nospares = 0;
2785
2786         sync_req = mddev->in_sync;
2787
2788         /* If this is just a dirty<->clean transition, and the array is clean
2789          * and 'events' is odd, we can roll back to the previous clean state */
2790         if (nospares
2791             && (mddev->in_sync && mddev->recovery_cp == MaxSector)
2792             && mddev->can_decrease_events
2793             && mddev->events != 1) {
2794                 mddev->events--;
2795                 mddev->can_decrease_events = 0;
2796         } else {
2797                 /* otherwise we have to go forward and ... */
2798                 mddev->events ++;
2799                 mddev->can_decrease_events = nospares;
2800         }
2801
2802         /*
2803          * This 64-bit counter should never wrap.
2804          * Either we are in around ~1 trillion A.C., assuming
2805          * 1 reboot per second, or we have a bug...
2806          */
2807         WARN_ON(mddev->events == 0);
2808
2809         rdev_for_each(rdev, mddev) {
2810                 if (rdev->badblocks.changed)
2811                         any_badblocks_changed++;
2812                 if (test_bit(Faulty, &rdev->flags))
2813                         set_bit(FaultRecorded, &rdev->flags);
2814         }
2815
2816         sync_sbs(mddev, nospares);
2817         spin_unlock(&mddev->lock);
2818
2819         pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
2820                  mdname(mddev), mddev->in_sync);
2821
2822         if (mddev->queue)
2823                 blk_add_trace_msg(mddev->queue, "md md_update_sb");
2824 rewrite:
2825         md_bitmap_update_sb(mddev->bitmap);
2826         rdev_for_each(rdev, mddev) {
2827                 char b[BDEVNAME_SIZE];
2828
2829                 if (rdev->sb_loaded != 1)
2830                         continue; /* no noise on spare devices */
2831
2832                 if (!test_bit(Faulty, &rdev->flags)) {
2833                         md_super_write(mddev,rdev,
2834                                        rdev->sb_start, rdev->sb_size,
2835                                        rdev->sb_page);
2836                         pr_debug("md: (write) %s's sb offset: %llu\n",
2837                                  bdevname(rdev->bdev, b),
2838                                  (unsigned long long)rdev->sb_start);
2839                         rdev->sb_events = mddev->events;
2840                         if (rdev->badblocks.size) {
2841                                 md_super_write(mddev, rdev,
2842                                                rdev->badblocks.sector,
2843                                                rdev->badblocks.size << 9,
2844                                                rdev->bb_page);
2845                                 rdev->badblocks.size = 0;
2846                         }
2847
2848                 } else
2849                         pr_debug("md: %s (skipping faulty)\n",
2850                                  bdevname(rdev->bdev, b));
2851
2852                 if (mddev->level == LEVEL_MULTIPATH)
2853                         /* only need to write one superblock... */
2854                         break;
2855         }
2856         if (md_super_wait(mddev) < 0)
2857                 goto rewrite;
2858         /* if there was a failure, MD_SB_CHANGE_DEVS was set, and we re-write super */
2859
2860         if (mddev_is_clustered(mddev) && ret == 0)
2861                 md_cluster_ops->metadata_update_finish(mddev);
2862
2863         if (mddev->in_sync != sync_req ||
2864             !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
2865                                BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_CLEAN)))
2866                 /* have to write it out again */
2867                 goto repeat;
2868         wake_up(&mddev->sb_wait);
2869         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
2870                 sysfs_notify_dirent_safe(mddev->sysfs_completed);
2871
2872         rdev_for_each(rdev, mddev) {
2873                 if (test_and_clear_bit(FaultRecorded, &rdev->flags))
2874                         clear_bit(Blocked, &rdev->flags);
2875
2876                 if (any_badblocks_changed)
2877                         ack_all_badblocks(&rdev->badblocks);
2878                 clear_bit(BlockedBadBlocks, &rdev->flags);
2879                 wake_up(&rdev->blocked_wait);
2880         }
2881 }
2882 EXPORT_SYMBOL(md_update_sb);
2883
2884 static int add_bound_rdev(struct md_rdev *rdev)
2885 {
2886         struct mddev *mddev = rdev->mddev;
2887         int err = 0;
2888         bool add_journal = test_bit(Journal, &rdev->flags);
2889
2890         if (!mddev->pers->hot_remove_disk || add_journal) {
2891                 /* If there is hot_add_disk but no hot_remove_disk
2892                  * then added disks for geometry changes,
2893                  * and should be added immediately.
2894                  */
2895                 super_types[mddev->major_version].
2896                         validate_super(mddev, rdev);
2897                 if (add_journal)
2898                         mddev_suspend(mddev);
2899                 err = mddev->pers->hot_add_disk(mddev, rdev);
2900                 if (add_journal)
2901                         mddev_resume(mddev);
2902                 if (err) {
2903                         md_kick_rdev_from_array(rdev);
2904                         return err;
2905                 }
2906         }
2907         sysfs_notify_dirent_safe(rdev->sysfs_state);
2908
2909         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2910         if (mddev->degraded)
2911                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
2912         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2913         md_new_event(mddev);
2914         md_wakeup_thread(mddev->thread);
2915         return 0;
2916 }
2917
2918 /* words written to sysfs files may, or may not, be \n terminated.
2919  * We want to accept with case. For this we use cmd_match.
2920  */
2921 static int cmd_match(const char *cmd, const char *str)
2922 {
2923         /* See if cmd, written into a sysfs file, matches
2924          * str.  They must either be the same, or cmd can
2925          * have a trailing newline
2926          */
2927         while (*cmd && *str && *cmd == *str) {
2928                 cmd++;
2929                 str++;
2930         }
2931         if (*cmd == '\n')
2932                 cmd++;
2933         if (*str || *cmd)
2934                 return 0;
2935         return 1;
2936 }
2937
2938 struct rdev_sysfs_entry {
2939         struct attribute attr;
2940         ssize_t (*show)(struct md_rdev *, char *);
2941         ssize_t (*store)(struct md_rdev *, const char *, size_t);
2942 };
2943
2944 static ssize_t
2945 state_show(struct md_rdev *rdev, char *page)
2946 {
2947         char *sep = ",";
2948         size_t len = 0;
2949         unsigned long flags = READ_ONCE(rdev->flags);
2950
2951         if (test_bit(Faulty, &flags) ||
2952             (!test_bit(ExternalBbl, &flags) &&
2953             rdev->badblocks.unacked_exist))
2954                 len += sprintf(page+len, "faulty%s", sep);
2955         if (test_bit(In_sync, &flags))
2956                 len += sprintf(page+len, "in_sync%s", sep);
2957         if (test_bit(Journal, &flags))
2958                 len += sprintf(page+len, "journal%s", sep);
2959         if (test_bit(WriteMostly, &flags))
2960                 len += sprintf(page+len, "write_mostly%s", sep);
2961         if (test_bit(Blocked, &flags) ||
2962             (rdev->badblocks.unacked_exist
2963              && !test_bit(Faulty, &flags)))
2964                 len += sprintf(page+len, "blocked%s", sep);
2965         if (!test_bit(Faulty, &flags) &&
2966             !test_bit(Journal, &flags) &&
2967             !test_bit(In_sync, &flags))
2968                 len += sprintf(page+len, "spare%s", sep);
2969         if (test_bit(WriteErrorSeen, &flags))
2970                 len += sprintf(page+len, "write_error%s", sep);
2971         if (test_bit(WantReplacement, &flags))
2972                 len += sprintf(page+len, "want_replacement%s", sep);
2973         if (test_bit(Replacement, &flags))
2974                 len += sprintf(page+len, "replacement%s", sep);
2975         if (test_bit(ExternalBbl, &flags))
2976                 len += sprintf(page+len, "external_bbl%s", sep);
2977         if (test_bit(FailFast, &flags))
2978                 len += sprintf(page+len, "failfast%s", sep);
2979
2980         if (len)
2981                 len -= strlen(sep);
2982
2983         return len+sprintf(page+len, "\n");
2984 }
2985
2986 static ssize_t
2987 state_store(struct md_rdev *rdev, const char *buf, size_t len)
2988 {
2989         /* can write
2990          *  faulty  - simulates an error
2991          *  remove  - disconnects the device
2992          *  writemostly - sets write_mostly
2993          *  -writemostly - clears write_mostly
2994          *  blocked - sets the Blocked flags
2995          *  -blocked - clears the Blocked and possibly simulates an error
2996          *  insync - sets Insync providing device isn't active
2997          *  -insync - clear Insync for a device with a slot assigned,
2998          *            so that it gets rebuilt based on bitmap
2999          *  write_error - sets WriteErrorSeen
3000          *  -write_error - clears WriteErrorSeen
3001          *  {,-}failfast - set/clear FailFast
3002          */
3003
3004         struct mddev *mddev = rdev->mddev;
3005         int err = -EINVAL;
3006         bool need_update_sb = false;
3007
3008         if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
3009                 md_error(rdev->mddev, rdev);
3010                 if (test_bit(Faulty, &rdev->flags))
3011                         err = 0;
3012                 else
3013                         err = -EBUSY;
3014         } else if (cmd_match(buf, "remove")) {
3015                 if (rdev->mddev->pers) {
3016                         clear_bit(Blocked, &rdev->flags);
3017                         remove_and_add_spares(rdev->mddev, rdev);
3018                 }
3019                 if (rdev->raid_disk >= 0)
3020                         err = -EBUSY;
3021                 else {
3022                         err = 0;
3023                         if (mddev_is_clustered(mddev))
3024                                 err = md_cluster_ops->remove_disk(mddev, rdev);
3025
3026                         if (err == 0) {
3027                                 md_kick_rdev_from_array(rdev);
3028                                 if (mddev->pers) {
3029                                         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
3030                                         md_wakeup_thread(mddev->thread);
3031                                 }
3032                                 md_new_event(mddev);
3033                         }
3034                 }
3035         } else if (cmd_match(buf, "writemostly")) {
3036                 set_bit(WriteMostly, &rdev->flags);
3037                 mddev_create_serial_pool(rdev->mddev, rdev, false);
3038                 need_update_sb = true;
3039                 err = 0;
3040         } else if (cmd_match(buf, "-writemostly")) {
3041                 mddev_destroy_serial_pool(rdev->mddev, rdev, false);
3042                 clear_bit(WriteMostly, &rdev->flags);
3043                 need_update_sb = true;
3044                 err = 0;
3045         } else if (cmd_match(buf, "blocked")) {
3046                 set_bit(Blocked, &rdev->flags);
3047                 err = 0;
3048         } else if (cmd_match(buf, "-blocked")) {
3049                 if (!test_bit(Faulty, &rdev->flags) &&
3050                     !test_bit(ExternalBbl, &rdev->flags) &&
3051                     rdev->badblocks.unacked_exist) {
3052                         /* metadata handler doesn't understand badblocks,
3053                          * so we need to fail the device
3054                          */
3055                         md_error(rdev->mddev, rdev);
3056                 }
3057                 clear_bit(Blocked, &rdev->flags);
3058                 clear_bit(BlockedBadBlocks, &rdev->flags);
3059                 wake_up(&rdev->blocked_wait);
3060                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3061                 md_wakeup_thread(rdev->mddev->thread);
3062
3063                 err = 0;
3064         } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
3065                 set_bit(In_sync, &rdev->flags);
3066                 err = 0;
3067         } else if (cmd_match(buf, "failfast")) {
3068                 set_bit(FailFast, &rdev->flags);
3069                 need_update_sb = true;
3070                 err = 0;
3071         } else if (cmd_match(buf, "-failfast")) {
3072                 clear_bit(FailFast, &rdev->flags);
3073                 need_update_sb = true;
3074                 err = 0;
3075         } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 &&
3076                    !test_bit(Journal, &rdev->flags)) {
3077                 if (rdev->mddev->pers == NULL) {
3078                         clear_bit(In_sync, &rdev->flags);
3079                         rdev->saved_raid_disk = rdev->raid_disk;
3080                         rdev->raid_disk = -1;
3081                         err = 0;
3082                 }
3083         } else if (cmd_match(buf, "write_error")) {
3084                 set_bit(WriteErrorSeen, &rdev->flags);
3085                 err = 0;
3086         } else if (cmd_match(buf, "-write_error")) {
3087                 clear_bit(WriteErrorSeen, &rdev->flags);
3088                 err = 0;
3089         } else if (cmd_match(buf, "want_replacement")) {
3090                 /* Any non-spare device that is not a replacement can
3091                  * become want_replacement at any time, but we then need to
3092                  * check if recovery is needed.
3093                  */
3094                 if (rdev->raid_disk >= 0 &&
3095                     !test_bit(Journal, &rdev->flags) &&
3096                     !test_bit(Replacement, &rdev->flags))
3097                         set_bit(WantReplacement, &rdev->flags);
3098                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3099                 md_wakeup_thread(rdev->mddev->thread);
3100                 err = 0;
3101         } else if (cmd_match(buf, "-want_replacement")) {
3102                 /* Clearing 'want_replacement' is always allowed.
3103                  * Once replacements starts it is too late though.
3104                  */
3105                 err = 0;
3106                 clear_bit(WantReplacement, &rdev->flags);
3107         } else if (cmd_match(buf, "replacement")) {
3108                 /* Can only set a device as a replacement when array has not
3109                  * yet been started.  Once running, replacement is automatic
3110                  * from spares, or by assigning 'slot'.
3111                  */
3112                 if (rdev->mddev->pers)
3113                         err = -EBUSY;
3114                 else {
3115                         set_bit(Replacement, &rdev->flags);
3116                         err = 0;
3117                 }
3118         } else if (cmd_match(buf, "-replacement")) {
3119                 /* Similarly, can only clear Replacement before start */
3120                 if (rdev->mddev->pers)
3121                         err = -EBUSY;
3122                 else {
3123                         clear_bit(Replacement, &rdev->flags);
3124                         err = 0;
3125                 }
3126         } else if (cmd_match(buf, "re-add")) {
3127                 if (!rdev->mddev->pers)
3128                         err = -EINVAL;
3129                 else if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
3130                                 rdev->saved_raid_disk >= 0) {
3131                         /* clear_bit is performed _after_ all the devices
3132                          * have their local Faulty bit cleared. If any writes
3133                          * happen in the meantime in the local node, they
3134                          * will land in the local bitmap, which will be synced
3135                          * by this node eventually
3136                          */
3137                         if (!mddev_is_clustered(rdev->mddev) ||
3138                             (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) {
3139                                 clear_bit(Faulty, &rdev->flags);
3140                                 err = add_bound_rdev(rdev);
3141                         }
3142                 } else
3143                         err = -EBUSY;
3144         } else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) {
3145                 set_bit(ExternalBbl, &rdev->flags);
3146                 rdev->badblocks.shift = 0;
3147                 err = 0;
3148         } else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) {
3149                 clear_bit(ExternalBbl, &rdev->flags);
3150                 err = 0;
3151         }
3152         if (need_update_sb)
3153                 md_update_sb(mddev, 1);
3154         if (!err)
3155                 sysfs_notify_dirent_safe(rdev->sysfs_state);
3156         return err ? err : len;
3157 }
3158 static struct rdev_sysfs_entry rdev_state =
3159 __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
3160
3161 static ssize_t
3162 errors_show(struct md_rdev *rdev, char *page)
3163 {
3164         return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
3165 }
3166
3167 static ssize_t
3168 errors_store(struct md_rdev *rdev, const char *buf, size_t len)
3169 {
3170         unsigned int n;
3171         int rv;
3172
3173         rv = kstrtouint(buf, 10, &n);
3174         if (rv < 0)
3175                 return rv;
3176         atomic_set(&rdev->corrected_errors, n);
3177         return len;
3178 }
3179 static struct rdev_sysfs_entry rdev_errors =
3180 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
3181
3182 static ssize_t
3183 slot_show(struct md_rdev *rdev, char *page)
3184 {
3185         if (test_bit(Journal, &rdev->flags))
3186                 return sprintf(page, "journal\n");
3187         else if (rdev->raid_disk < 0)
3188                 return sprintf(page, "none\n");
3189         else
3190                 return sprintf(page, "%d\n", rdev->raid_disk);
3191 }
3192
3193 static ssize_t
3194 slot_store(struct md_rdev *rdev, const char *buf, size_t len)
3195 {
3196         int slot;
3197         int err;
3198
3199         if (test_bit(Journal, &rdev->flags))
3200                 return -EBUSY;
3201         if (strncmp(buf, "none", 4)==0)
3202                 slot = -1;
3203         else {
3204                 err = kstrtouint(buf, 10, (unsigned int *)&slot);
3205                 if (err < 0)
3206                         return err;
3207         }
3208         if (rdev->mddev->pers && slot == -1) {
3209                 /* Setting 'slot' on an active array requires also
3210                  * updating the 'rd%d' link, and communicating
3211                  * with the personality with ->hot_*_disk.
3212                  * For now we only support removing
3213                  * failed/spare devices.  This normally happens automatically,
3214                  * but not when the metadata is externally managed.
3215                  */
3216                 if (rdev->raid_disk == -1)
3217                         return -EEXIST;
3218                 /* personality does all needed checks */
3219                 if (rdev->mddev->pers->hot_remove_disk == NULL)
3220                         return -EINVAL;
3221                 clear_bit(Blocked, &rdev->flags);
3222                 remove_and_add_spares(rdev->mddev, rdev);
3223                 if (rdev->raid_disk >= 0)
3224                         return -EBUSY;
3225                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3226                 md_wakeup_thread(rdev->mddev->thread);
3227         } else if (rdev->mddev->pers) {
3228                 /* Activating a spare .. or possibly reactivating
3229                  * if we ever get bitmaps working here.
3230                  */
3231                 int err;
3232
3233                 if (rdev->raid_disk != -1)
3234                         return -EBUSY;
3235
3236                 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
3237                         return -EBUSY;
3238
3239                 if (rdev->mddev->pers->hot_add_disk == NULL)
3240                         return -EINVAL;
3241
3242                 if (slot >= rdev->mddev->raid_disks &&
3243                     slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
3244                         return -ENOSPC;
3245
3246                 rdev->raid_disk = slot;
3247                 if (test_bit(In_sync, &rdev->flags))
3248                         rdev->saved_raid_disk = slot;
3249                 else
3250                         rdev->saved_raid_disk = -1;
3251                 clear_bit(In_sync, &rdev->flags);
3252                 clear_bit(Bitmap_sync, &rdev->flags);
3253                 err = rdev->mddev->pers->hot_add_disk(rdev->mddev, rdev);
3254                 if (err) {
3255                         rdev->raid_disk = -1;
3256                         return err;
3257                 } else
3258                         sysfs_notify_dirent_safe(rdev->sysfs_state);
3259                 /* failure here is OK */;
3260                 sysfs_link_rdev(rdev->mddev, rdev);
3261                 /* don't wakeup anyone, leave that to userspace. */
3262         } else {
3263                 if (slot >= rdev->mddev->raid_disks &&
3264                     slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
3265                         return -ENOSPC;
3266                 rdev->raid_disk = slot;
3267                 /* assume it is working */
3268                 clear_bit(Faulty, &rdev->flags);
3269                 clear_bit(WriteMostly, &rdev->flags);
3270                 set_bit(In_sync, &rdev->flags);
3271                 sysfs_notify_dirent_safe(rdev->sysfs_state);
3272         }
3273         return len;
3274 }
3275
3276 static struct rdev_sysfs_entry rdev_slot =
3277 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
3278
3279 static ssize_t
3280 offset_show(struct md_rdev *rdev, char *page)
3281 {
3282         return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
3283 }
3284
3285 static ssize_t
3286 offset_store(struct md_rdev *rdev, const char *buf, size_t len)
3287 {
3288         unsigned long long offset;
3289         if (kstrtoull(buf, 10, &offset) < 0)
3290                 return -EINVAL;
3291         if (rdev->mddev->pers && rdev->raid_disk >= 0)
3292                 return -EBUSY;
3293         if (rdev->sectors && rdev->mddev->external)
3294                 /* Must set offset before size, so overlap checks
3295                  * can be sane */
3296                 return -EBUSY;
3297         rdev->data_offset = offset;
3298         rdev->new_data_offset = offset;
3299         return len;
3300 }
3301
3302 static struct rdev_sysfs_entry rdev_offset =
3303 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
3304
3305 static ssize_t new_offset_show(struct md_rdev *rdev, char *page)
3306 {
3307         return sprintf(page, "%llu\n",
3308                        (unsigned long long)rdev->new_data_offset);
3309 }
3310
3311 static ssize_t new_offset_store(struct md_rdev *rdev,
3312                                 const char *buf, size_t len)
3313 {
3314         unsigned long long new_offset;
3315         struct mddev *mddev = rdev->mddev;
3316
3317         if (kstrtoull(buf, 10, &new_offset) < 0)
3318                 return -EINVAL;
3319
3320         if (mddev->sync_thread ||
3321             test_bit(MD_RECOVERY_RUNNING,&mddev->recovery))
3322                 return -EBUSY;
3323         if (new_offset == rdev->data_offset)
3324                 /* reset is always permitted */
3325                 ;
3326         else if (new_offset > rdev->data_offset) {
3327                 /* must not push array size beyond rdev_sectors */
3328                 if (new_offset - rdev->data_offset
3329                     + mddev->dev_sectors > rdev->sectors)
3330                                 return -E2BIG;
3331         }
3332         /* Metadata worries about other space details. */
3333
3334         /* decreasing the offset is inconsistent with a backwards
3335          * reshape.
3336          */
3337         if (new_offset < rdev->data_offset &&
3338             mddev->reshape_backwards)
3339                 return -EINVAL;
3340         /* Increasing offset is inconsistent with forwards
3341          * reshape.  reshape_direction should be set to
3342          * 'backwards' first.
3343          */
3344         if (new_offset > rdev->data_offset &&
3345             !mddev->reshape_backwards)
3346                 return -EINVAL;
3347
3348         if (mddev->pers && mddev->persistent &&
3349             !super_types[mddev->major_version]
3350             .allow_new_offset(rdev, new_offset))
3351                 return -E2BIG;
3352         rdev->new_data_offset = new_offset;
3353         if (new_offset > rdev->data_offset)
3354                 mddev->reshape_backwards = 1;
3355         else if (new_offset < rdev->data_offset)
3356                 mddev->reshape_backwards = 0;
3357
3358         return len;
3359 }
3360 static struct rdev_sysfs_entry rdev_new_offset =
3361 __ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store);
3362
3363 static ssize_t
3364 rdev_size_show(struct md_rdev *rdev, char *page)
3365 {
3366         return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
3367 }
3368
3369 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
3370 {
3371         /* check if two start/length pairs overlap */
3372         if (s1+l1 <= s2)
3373                 return 0;
3374         if (s2+l2 <= s1)
3375                 return 0;
3376         return 1;
3377 }
3378
3379 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
3380 {
3381         unsigned long long blocks;
3382         sector_t new;
3383
3384         if (kstrtoull(buf, 10, &blocks) < 0)
3385                 return -EINVAL;
3386
3387         if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
3388                 return -EINVAL; /* sector conversion overflow */
3389
3390         new = blocks * 2;
3391         if (new != blocks * 2)
3392                 return -EINVAL; /* unsigned long long to sector_t overflow */
3393
3394         *sectors = new;
3395         return 0;
3396 }
3397
3398 static ssize_t
3399 rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
3400 {
3401         struct mddev *my_mddev = rdev->mddev;
3402         sector_t oldsectors = rdev->sectors;
3403         sector_t sectors;
3404
3405         if (test_bit(Journal, &rdev->flags))
3406                 return -EBUSY;
3407         if (strict_blocks_to_sectors(buf, &sectors) < 0)
3408                 return -EINVAL;
3409         if (rdev->data_offset != rdev->new_data_offset)
3410                 return -EINVAL; /* too confusing */
3411         if (my_mddev->pers && rdev->raid_disk >= 0) {
3412                 if (my_mddev->persistent) {
3413                         sectors = super_types[my_mddev->major_version].
3414                                 rdev_size_change(rdev, sectors);
3415                         if (!sectors)
3416                                 return -EBUSY;
3417                 } else if (!sectors)
3418                         sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
3419                                 rdev->data_offset;
3420                 if (!my_mddev->pers->resize)
3421                         /* Cannot change size for RAID0 or Linear etc */
3422                         return -EINVAL;
3423         }
3424         if (sectors < my_mddev->dev_sectors)
3425                 return -EINVAL; /* component must fit device */
3426
3427         rdev->sectors = sectors;
3428         if (sectors > oldsectors && my_mddev->external) {
3429                 /* Need to check that all other rdevs with the same
3430                  * ->bdev do not overlap.  'rcu' is sufficient to walk
3431                  * the rdev lists safely.
3432                  * This check does not provide a hard guarantee, it
3433                  * just helps avoid dangerous mistakes.
3434                  */
3435                 struct mddev *mddev;
3436                 int overlap = 0;
3437                 struct list_head *tmp;
3438
3439                 rcu_read_lock();
3440                 for_each_mddev(mddev, tmp) {
3441                         struct md_rdev *rdev2;
3442
3443                         rdev_for_each(rdev2, mddev)
3444                                 if (rdev->bdev == rdev2->bdev &&
3445                                     rdev != rdev2 &&
3446                                     overlaps(rdev->data_offset, rdev->sectors,
3447                                              rdev2->data_offset,
3448                                              rdev2->sectors)) {
3449                                         overlap = 1;
3450                                         break;
3451                                 }
3452                         if (overlap) {
3453                                 mddev_put(mddev);
3454                                 break;
3455                         }
3456                 }
3457                 rcu_read_unlock();
3458                 if (overlap) {
3459                         /* Someone else could have slipped in a size
3460                          * change here, but doing so is just silly.
3461                          * We put oldsectors back because we *know* it is
3462                          * safe, and trust userspace not to race with
3463                          * itself
3464                          */
3465                         rdev->sectors = oldsectors;
3466                         return -EBUSY;
3467                 }
3468         }
3469         return len;
3470 }
3471
3472 static struct rdev_sysfs_entry rdev_size =
3473 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
3474
3475 static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
3476 {
3477         unsigned long long recovery_start = rdev->recovery_offset;
3478
3479         if (test_bit(In_sync, &rdev->flags) ||
3480             recovery_start == MaxSector)
3481                 return sprintf(page, "none\n");
3482
3483         return sprintf(page, "%llu\n", recovery_start);
3484 }
3485
3486 static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len)
3487 {
3488         unsigned long long recovery_start;
3489
3490         if (cmd_match(buf, "none"))
3491                 recovery_start = MaxSector;
3492         else if (kstrtoull(buf, 10, &recovery_start))
3493                 return -EINVAL;
3494
3495         if (rdev->mddev->pers &&
3496             rdev->raid_disk >= 0)
3497                 return -EBUSY;
3498
3499         rdev->recovery_offset = recovery_start;
3500         if (recovery_start == MaxSector)
3501                 set_bit(In_sync, &rdev->flags);
3502         else
3503                 clear_bit(In_sync, &rdev->flags);
3504         return len;
3505 }
3506
3507 static struct rdev_sysfs_entry rdev_recovery_start =
3508 __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
3509
3510 /* sysfs access to bad-blocks list.
3511  * We present two files.
3512  * 'bad-blocks' lists sector numbers and lengths of ranges that
3513  *    are recorded as bad.  The list is truncated to fit within
3514  *    the one-page limit of sysfs.
3515  *    Writing "sector length" to this file adds an acknowledged
3516  *    bad block list.
3517  * 'unacknowledged-bad-blocks' lists bad blocks that have not yet
3518  *    been acknowledged.  Writing to this file adds bad blocks
3519  *    without acknowledging them.  This is largely for testing.
3520  */
3521 static ssize_t bb_show(struct md_rdev *rdev, char *page)
3522 {
3523         return badblocks_show(&rdev->badblocks, page, 0);
3524 }
3525 static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
3526 {
3527         int rv = badblocks_store(&rdev->badblocks, page, len, 0);
3528         /* Maybe that ack was all we needed */
3529         if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
3530                 wake_up(&rdev->blocked_wait);
3531         return rv;
3532 }
3533 static struct rdev_sysfs_entry rdev_bad_blocks =
3534 __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
3535
3536 static ssize_t ubb_show(struct md_rdev *rdev, char *page)
3537 {
3538         return badblocks_show(&rdev->badblocks, page, 1);
3539 }
3540 static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
3541 {
3542         return badblocks_store(&rdev->badblocks, page, len, 1);
3543 }
3544 static struct rdev_sysfs_entry rdev_unack_bad_blocks =
3545 __ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);
3546
3547 static ssize_t
3548 ppl_sector_show(struct md_rdev *rdev, char *page)
3549 {
3550         return sprintf(page, "%llu\n", (unsigned long long)rdev->ppl.sector);
3551 }
3552
3553 static ssize_t
3554 ppl_sector_store(struct md_rdev *rdev, const char *buf, size_t len)
3555 {
3556         unsigned long long sector;
3557
3558         if (kstrtoull(buf, 10, &sector) < 0)
3559                 return -EINVAL;
3560         if (sector != (sector_t)sector)
3561                 return -EINVAL;
3562
3563         if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) &&
3564             rdev->raid_disk >= 0)
3565                 return -EBUSY;
3566
3567         if (rdev->mddev->persistent) {
3568                 if (rdev->mddev->major_version == 0)
3569                         return -EINVAL;
3570                 if ((sector > rdev->sb_start &&
3571                      sector - rdev->sb_start > S16_MAX) ||
3572                     (sector < rdev->sb_start &&
3573                      rdev->sb_start - sector > -S16_MIN))
3574                         return -EINVAL;
3575                 rdev->ppl.offset = sector - rdev->sb_start;
3576         } else if (!rdev->mddev->external) {
3577                 return -EBUSY;
3578         }
3579         rdev->ppl.sector = sector;
3580         return len;
3581 }
3582
3583 static struct rdev_sysfs_entry rdev_ppl_sector =
3584 __ATTR(ppl_sector, S_IRUGO|S_IWUSR, ppl_sector_show, ppl_sector_store);
3585
3586 static ssize_t
3587 ppl_size_show(struct md_rdev *rdev, char *page)
3588 {
3589         return sprintf(page, "%u\n", rdev->ppl.size);
3590 }
3591
3592 static ssize_t
3593 ppl_size_store(struct md_rdev *rdev, const char *buf, size_t len)
3594 {
3595         unsigned int size;
3596
3597         if (kstrtouint(buf, 10, &size) < 0)
3598                 return -EINVAL;
3599
3600         if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) &&
3601             rdev->raid_disk >= 0)
3602                 return -EBUSY;
3603
3604         if (rdev->mddev->persistent) {
3605                 if (rdev->mddev->major_version == 0)
3606                         return -EINVAL;
3607                 if (size > U16_MAX)
3608                         return -EINVAL;
3609         } else if (!rdev->mddev->external) {
3610                 return -EBUSY;
3611         }
3612         rdev->ppl.size = size;
3613         return len;
3614 }
3615
3616 static struct rdev_sysfs_entry rdev_ppl_size =
3617 __ATTR(ppl_size, S_IRUGO|S_IWUSR, ppl_size_show, ppl_size_store);
3618
3619 static struct attribute *rdev_default_attrs[] = {
3620         &rdev_state.attr,
3621         &rdev_errors.attr,
3622         &rdev_slot.attr,
3623         &rdev_offset.attr,
3624         &rdev_new_offset.attr,
3625         &rdev_size.attr,
3626         &rdev_recovery_start.attr,
3627         &rdev_bad_blocks.attr,
3628         &rdev_unack_bad_blocks.attr,
3629         &rdev_ppl_sector.attr,
3630         &rdev_ppl_size.attr,
3631         NULL,
3632 };
3633 static ssize_t
3634 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
3635 {
3636         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
3637         struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3638
3639         if (!entry->show)
3640                 return -EIO;
3641         if (!rdev->mddev)
3642                 return -ENODEV;
3643         return entry->show(rdev, page);
3644 }
3645
3646 static ssize_t
3647 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
3648               const char *page, size_t length)
3649 {
3650         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
3651         struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3652         ssize_t rv;
3653         struct mddev *mddev = rdev->mddev;
3654
3655         if (!entry->store)
3656                 return -EIO;
3657         if (!capable(CAP_SYS_ADMIN))
3658                 return -EACCES;
3659         rv = mddev ? mddev_lock(mddev) : -ENODEV;
3660         if (!rv) {
3661                 if (rdev->mddev == NULL)
3662                         rv = -ENODEV;
3663                 else
3664                         rv = entry->store(rdev, page, length);
3665                 mddev_unlock(mddev);
3666         }
3667         return rv;
3668 }
3669
3670 static void rdev_free(struct kobject *ko)
3671 {
3672         struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj);
3673         kfree(rdev);
3674 }
3675 static const struct sysfs_ops rdev_sysfs_ops = {
3676         .show           = rdev_attr_show,
3677         .store          = rdev_attr_store,
3678 };
3679 static struct kobj_type rdev_ktype = {
3680         .release        = rdev_free,
3681         .sysfs_ops      = &rdev_sysfs_ops,
3682         .default_attrs  = rdev_default_attrs,
3683 };
3684
3685 int md_rdev_init(struct md_rdev *rdev)
3686 {
3687         rdev->desc_nr = -1;
3688         rdev->saved_raid_disk = -1;
3689         rdev->raid_disk = -1;
3690         rdev->flags = 0;
3691         rdev->data_offset = 0;
3692         rdev->new_data_offset = 0;
3693         rdev->sb_events = 0;
3694         rdev->last_read_error = 0;
3695         rdev->sb_loaded = 0;
3696         rdev->bb_page = NULL;
3697         atomic_set(&rdev->nr_pending, 0);
3698         atomic_set(&rdev->read_errors, 0);
3699         atomic_set(&rdev->corrected_errors, 0);
3700
3701         INIT_LIST_HEAD(&rdev->same_set);
3702         init_waitqueue_head(&rdev->blocked_wait);
3703
3704         /* Add space to store bad block list.
3705          * This reserves the space even on arrays where it cannot
3706          * be used - I wonder if that matters
3707          */
3708         return badblocks_init(&rdev->badblocks, 0);
3709 }
3710 EXPORT_SYMBOL_GPL(md_rdev_init);
3711 /*
3712  * Import a device. If 'super_format' >= 0, then sanity check the superblock
3713  *
3714  * mark the device faulty if:
3715  *
3716  *   - the device is nonexistent (zero size)
3717  *   - the device has no valid superblock
3718  *
3719  * a faulty rdev _never_ has rdev->sb set.
3720  */
3721 static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
3722 {
3723         char b[BDEVNAME_SIZE];
3724         int err;
3725         struct md_rdev *rdev;
3726         sector_t size;
3727
3728         rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
3729         if (!rdev)
3730                 return ERR_PTR(-ENOMEM);
3731
3732         err = md_rdev_init(rdev);
3733         if (err)
3734                 goto abort_free;
3735         err = alloc_disk_sb(rdev);
3736         if (err)
3737                 goto abort_free;
3738
3739         err = lock_rdev(rdev, newdev, super_format == -2);
3740         if (err)
3741                 goto abort_free;
3742
3743         kobject_init(&rdev->kobj, &rdev_ktype);
3744
3745         size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
3746         if (!size) {
3747                 pr_warn("md: %s has zero or unknown size, marking faulty!\n",
3748                         bdevname(rdev->bdev,b));
3749                 err = -EINVAL;
3750                 goto abort_free;
3751         }
3752
3753         if (super_format >= 0) {
3754                 err = super_types[super_format].
3755                         load_super(rdev, NULL, super_minor);
3756                 if (err == -EINVAL) {
3757                         pr_warn("md: %s does not have a valid v%d.%d superblock, not importing!\n",
3758                                 bdevname(rdev->bdev,b),
3759                                 super_format, super_minor);
3760                         goto abort_free;
3761                 }
3762                 if (err < 0) {
3763                         pr_warn("md: could not read %s's sb, not importing!\n",
3764                                 bdevname(rdev->bdev,b));
3765                         goto abort_free;
3766                 }
3767         }
3768
3769         return rdev;
3770
3771 abort_free:
3772         if (rdev->bdev)
3773                 unlock_rdev(rdev);
3774         md_rdev_clear(rdev);
3775         kfree(rdev);
3776         return ERR_PTR(err);
3777 }
3778
3779 /*
3780  * Check a full RAID array for plausibility
3781  */
3782
3783 static int analyze_sbs(struct mddev *mddev)
3784 {
3785         int i;
3786         struct md_rdev *rdev, *freshest, *tmp;
3787         char b[BDEVNAME_SIZE];
3788
3789         freshest = NULL;
3790         rdev_for_each_safe(rdev, tmp, mddev)
3791                 switch (super_types[mddev->major_version].
3792                         load_super(rdev, freshest, mddev->minor_version)) {
3793                 case 1:
3794                         freshest = rdev;
3795                         break;
3796                 case 0:
3797                         break;
3798                 default:
3799                         pr_warn("md: fatal superblock inconsistency in %s -- removing from array\n",
3800                                 bdevname(rdev->bdev,b));
3801                         md_kick_rdev_from_array(rdev);
3802                 }
3803
3804         /* Cannot find a valid fresh disk */
3805         if (!freshest) {
3806                 pr_warn("md: cannot find a valid disk\n");
3807                 return -EINVAL;
3808         }
3809
3810         super_types[mddev->major_version].
3811                 validate_super(mddev, freshest);
3812
3813         i = 0;
3814         rdev_for_each_safe(rdev, tmp, mddev) {
3815                 if (mddev->max_disks &&
3816                     (rdev->desc_nr >= mddev->max_disks ||
3817                      i > mddev->max_disks)) {
3818                         pr_warn("md: %s: %s: only %d devices permitted\n",
3819                                 mdname(mddev), bdevname(rdev->bdev, b),
3820                                 mddev->max_disks);
3821                         md_kick_rdev_from_array(rdev);
3822                         continue;
3823                 }
3824                 if (rdev != freshest) {
3825                         if (super_types[mddev->major_version].
3826                             validate_super(mddev, rdev)) {
3827                                 pr_warn("md: kicking non-fresh %s from array!\n",
3828                                         bdevname(rdev->bdev,b));
3829                                 md_kick_rdev_from_array(rdev);
3830                                 continue;
3831                         }
3832                 }
3833                 if (mddev->level == LEVEL_MULTIPATH) {
3834                         rdev->desc_nr = i++;
3835                         rdev->raid_disk = rdev->desc_nr;
3836                         set_bit(In_sync, &rdev->flags);
3837                 } else if (rdev->raid_disk >=
3838                             (mddev->raid_disks - min(0, mddev->delta_disks)) &&
3839                            !test_bit(Journal, &rdev->flags)) {
3840                         rdev->raid_disk = -1;
3841                         clear_bit(In_sync, &rdev->flags);
3842                 }
3843         }
3844
3845         return 0;
3846 }
3847
3848 /* Read a fixed-point number.
3849  * Numbers in sysfs attributes should be in "standard" units where
3850  * possible, so time should be in seconds.
3851  * However we internally use a a much smaller unit such as
3852  * milliseconds or jiffies.
3853  * This function takes a decimal number with a possible fractional
3854  * component, and produces an integer which is the result of
3855  * multiplying that number by 10^'scale'.
3856  * all without any floating-point arithmetic.
3857  */
3858 int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
3859 {
3860         unsigned long result = 0;
3861         long decimals = -1;
3862         while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
3863                 if (*cp == '.')
3864                         decimals = 0;
3865                 else if (decimals < scale) {
3866                         unsigned int value;
3867                         value = *cp - '0';
3868                         result = result * 10 + value;
3869                         if (decimals >= 0)
3870                                 decimals++;
3871                 }
3872                 cp++;
3873         }
3874         if (*cp == '\n')
3875                 cp++;
3876         if (*cp)
3877                 return -EINVAL;
3878         if (decimals < 0)
3879                 decimals = 0;
3880         *res = result * int_pow(10, scale - decimals);
3881         return 0;
3882 }
3883
3884 static ssize_t
3885 safe_delay_show(struct mddev *mddev, char *page)
3886 {
3887         int msec = (mddev->safemode_delay*1000)/HZ;
3888         return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
3889 }
3890 static ssize_t
3891 safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
3892 {
3893         unsigned long msec;
3894
3895         if (mddev_is_clustered(mddev)) {
3896                 pr_warn("md: Safemode is disabled for clustered mode\n");
3897                 return -EINVAL;
3898         }
3899
3900         if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
3901                 return -EINVAL;
3902         if (msec == 0)
3903                 mddev->safemode_delay = 0;
3904         else {
3905                 unsigned long old_delay = mddev->safemode_delay;
3906                 unsigned long new_delay = (msec*HZ)/1000;
3907
3908                 if (new_delay == 0)
3909                         new_delay = 1;
3910                 mddev->safemode_delay = new_delay;
3911                 if (new_delay < old_delay || old_delay == 0)
3912                         mod_timer(&mddev->safemode_timer, jiffies+1);
3913         }
3914         return len;
3915 }
3916 static struct md_sysfs_entry md_safe_delay =
3917 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
3918
3919 static ssize_t
3920 level_show(struct mddev *mddev, char *page)
3921 {
3922         struct md_personality *p;
3923         int ret;
3924         spin_lock(&mddev->lock);
3925         p = mddev->pers;
3926         if (p)
3927                 ret = sprintf(page, "%s\n", p->name);
3928         else if (mddev->clevel[0])
3929                 ret = sprintf(page, "%s\n", mddev->clevel);
3930         else if (mddev->level != LEVEL_NONE)
3931                 ret = sprintf(page, "%d\n", mddev->level);
3932         else
3933                 ret = 0;
3934         spin_unlock(&mddev->lock);
3935         return ret;
3936 }
3937
3938 static ssize_t
3939 level_store(struct mddev *mddev, const char *buf, size_t len)
3940 {
3941         char clevel[16];
3942         ssize_t rv;
3943         size_t slen = len;
3944         struct md_personality *pers, *oldpers;
3945         long level;
3946         void *priv, *oldpriv;
3947         struct md_rdev *rdev;
3948
3949         if (slen == 0 || slen >= sizeof(clevel))
3950                 return -EINVAL;
3951
3952         rv = mddev_lock(mddev);
3953         if (rv)
3954                 return rv;
3955
3956         if (mddev->pers == NULL) {
3957                 strncpy(mddev->clevel, buf, slen);
3958                 if (mddev->clevel[slen-1] == '\n')
3959                         slen--;
3960                 mddev->clevel[slen] = 0;
3961                 mddev->level = LEVEL_NONE;
3962                 rv = len;
3963                 goto out_unlock;
3964         }
3965         rv = -EROFS;
3966         if (mddev->ro)
3967                 goto out_unlock;
3968
3969         /* request to change the personality.  Need to ensure:
3970          *  - array is not engaged in resync/recovery/reshape
3971          *  - old personality can be suspended
3972          *  - new personality will access other array.
3973          */
3974
3975         rv = -EBUSY;
3976         if (mddev->sync_thread ||
3977             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3978             mddev->reshape_position != MaxSector ||
3979             mddev->sysfs_active)
3980                 goto out_unlock;
3981
3982         rv = -EINVAL;
3983         if (!mddev->pers->quiesce) {
3984                 pr_warn("md: %s: %s does not support online personality change\n",
3985                         mdname(mddev), mddev->pers->name);
3986                 goto out_unlock;
3987         }
3988
3989         /* Now find the new personality */
3990         strncpy(clevel, buf, slen);
3991         if (clevel[slen-1] == '\n')
3992                 slen--;
3993         clevel[slen] = 0;
3994         if (kstrtol(clevel, 10, &level))
3995                 level = LEVEL_NONE;
3996
3997         if (request_module("md-%s", clevel) != 0)
3998                 request_module("md-level-%s", clevel);
3999         spin_lock(&pers_lock);
4000         pers = find_pers(level, clevel);
4001         if (!pers || !try_module_get(pers->owner)) {
4002                 spin_unlock(&pers_lock);
4003                 pr_warn("md: personality %s not loaded\n", clevel);
4004                 rv = -EINVAL;
4005                 goto out_unlock;
4006         }
4007         spin_unlock(&pers_lock);
4008
4009         if (pers == mddev->pers) {
4010                 /* Nothing to do! */
4011                 module_put(pers->owner);
4012                 rv = len;
4013                 goto out_unlock;
4014         }
4015         if (!pers->takeover) {
4016                 module_put(pers->owner);
4017                 pr_warn("md: %s: %s does not support personality takeover\n",
4018                         mdname(mddev), clevel);
4019                 rv = -EINVAL;
4020                 goto out_unlock;
4021         }
4022
4023         rdev_for_each(rdev, mddev)
4024                 rdev->new_raid_disk = rdev->raid_disk;
4025
4026         /* ->takeover must set new_* and/or delta_disks
4027          * if it succeeds, and may set them when it fails.
4028          */
4029         priv = pers->takeover(mddev);
4030         if (IS_ERR(priv)) {
4031                 mddev->new_level = mddev->level;
4032                 mddev->new_layout = mddev->layout;
4033                 mddev->new_chunk_sectors = mddev->chunk_sectors;
4034                 mddev->raid_disks -= mddev->delta_disks;
4035                 mddev->delta_disks = 0;
4036                 mddev->reshape_backwards = 0;
4037                 module_put(pers->owner);
4038                 pr_warn("md: %s: %s would not accept array\n",
4039                         mdname(mddev), clevel);
4040                 rv = PTR_ERR(priv);
4041                 goto out_unlock;
4042         }
4043
4044         /* Looks like we have a winner */
4045         mddev_suspend(mddev);
4046         mddev_detach(mddev);
4047
4048         spin_lock(&mddev->lock);
4049         oldpers = mddev->pers;
4050         oldpriv = mddev->private;
4051         mddev->pers = pers;
4052         mddev->private = priv;
4053         strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
4054         mddev->level = mddev->new_level;
4055         mddev->layout = mddev->new_layout;
4056         mddev->chunk_sectors = mddev->new_chunk_sectors;
4057         mddev->delta_disks = 0;
4058         mddev->reshape_backwards = 0;
4059         mddev->degraded = 0;
4060         spin_unlock(&mddev->lock);
4061
4062         if (oldpers->sync_request == NULL &&
4063             mddev->external) {
4064                 /* We are converting from a no-redundancy array
4065                  * to a redundancy array and metadata is managed
4066                  * externally so we need to be sure that writes
4067                  * won't block due to a need to transition
4068                  *      clean->dirty
4069                  * until external management is started.
4070                  */
4071                 mddev->in_sync = 0;
4072                 mddev->safemode_delay = 0;
4073                 mddev->safemode = 0;
4074         }
4075
4076         oldpers->free(mddev, oldpriv);
4077
4078         if (oldpers->sync_request == NULL &&
4079             pers->sync_request != NULL) {
4080                 /* need to add the md_redundancy_group */
4081                 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
4082                         pr_warn("md: cannot register extra attributes for %s\n",
4083                                 mdname(mddev));
4084                 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
4085                 mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
4086                 mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
4087         }
4088         if (oldpers->sync_request != NULL &&
4089             pers->sync_request == NULL) {
4090                 /* need to remove the md_redundancy_group */
4091                 if (mddev->to_remove == NULL)
4092                         mddev->to_remove = &md_redundancy_group;
4093         }
4094
4095         module_put(oldpers->owner);
4096
4097         rdev_for_each(rdev, mddev) {
4098                 if (rdev->raid_disk < 0)
4099                         continue;
4100                 if (rdev->new_raid_disk >= mddev->raid_disks)
4101                         rdev->new_raid_disk = -1;
4102                 if (rdev->new_raid_disk == rdev->raid_disk)
4103                         continue;
4104                 sysfs_unlink_rdev(mddev, rdev);
4105         }
4106         rdev_for_each(rdev, mddev) {
4107                 if (rdev->raid_disk < 0)
4108                         continue;
4109                 if (rdev->new_raid_disk == rdev->raid_disk)
4110                         continue;
4111                 rdev->raid_disk = rdev->new_raid_disk;
4112                 if (rdev->raid_disk < 0)
4113                         clear_bit(In_sync, &rdev->flags);
4114                 else {
4115                         if (sysfs_link_rdev(mddev, rdev))
4116                                 pr_warn("md: cannot register rd%d for %s after level change\n",
4117                                         rdev->raid_disk, mdname(mddev));
4118                 }
4119         }
4120
4121         if (pers->sync_request == NULL) {
4122                 /* this is now an array without redundancy, so
4123                  * it must always be in_sync
4124                  */
4125                 mddev->in_sync = 1;
4126                 del_timer_sync(&mddev->safemode_timer);
4127         }
4128         blk_set_stacking_limits(&mddev->queue->limits);
4129         pers->run(mddev);
4130         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
4131         mddev_resume(mddev);
4132         if (!mddev->thread)
4133                 md_update_sb(mddev, 1);
4134         sysfs_notify_dirent_safe(mddev->sysfs_level);
4135         md_new_event(mddev);
4136         rv = len;
4137 out_unlock:
4138         mddev_unlock(mddev);
4139         return rv;
4140 }
4141
4142 static struct md_sysfs_entry md_level =
4143 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
4144
4145 static ssize_t
4146 layout_show(struct mddev *mddev, char *page)
4147 {
4148         /* just a number, not meaningful for all levels */
4149         if (mddev->reshape_position != MaxSector &&
4150             mddev->layout != mddev->new_layout)
4151                 return sprintf(page, "%d (%d)\n",
4152                                mddev->new_layout, mddev->layout);
4153         return sprintf(page, "%d\n", mddev->layout);
4154 }
4155
4156 static ssize_t
4157 layout_store(struct mddev *mddev, const char *buf, size_t len)
4158 {
4159         unsigned int n;
4160         int err;
4161
4162         err = kstrtouint(buf, 10, &n);
4163         if (err < 0)
4164                 return err;
4165         err = mddev_lock(mddev);
4166         if (err)
4167                 return err;
4168
4169         if (mddev->pers) {
4170                 if (mddev->pers->check_reshape == NULL)
4171                         err = -EBUSY;
4172                 else if (mddev->ro)
4173                         err = -EROFS;
4174                 else {
4175                         mddev->new_layout = n;
4176                         err = mddev->pers->check_reshape(mddev);
4177                         if (err)
4178                                 mddev->new_layout = mddev->layout;
4179                 }
4180         } else {
4181                 mddev->new_layout = n;
4182                 if (mddev->reshape_position == MaxSector)
4183                         mddev->layout = n;
4184         }
4185         mddev_unlock(mddev);
4186         return err ?: len;
4187 }
4188 static struct md_sysfs_entry md_layout =
4189 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
4190
4191 static ssize_t
4192 raid_disks_show(struct mddev *mddev, char *page)
4193 {
4194         if (mddev->raid_disks == 0)
4195                 return 0;
4196         if (mddev->reshape_position != MaxSector &&
4197             mddev->delta_disks != 0)
4198                 return sprintf(page, "%d (%d)\n", mddev->raid_disks,
4199                                mddev->raid_disks - mddev->delta_disks);
4200         return sprintf(page, "%d\n", mddev->raid_disks);
4201 }
4202
4203 static int update_raid_disks(struct mddev *mddev, int raid_disks);
4204
4205 static ssize_t
4206 raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
4207 {
4208         unsigned int n;
4209         int err;
4210
4211         err = kstrtouint(buf, 10, &n);
4212         if (err < 0)
4213                 return err;
4214
4215         err = mddev_lock(mddev);
4216         if (err)
4217                 return err;
4218         if (mddev->pers)
4219                 err = update_raid_disks(mddev, n);
4220         else if (mddev->reshape_position != MaxSector) {
4221                 struct md_rdev *rdev;
4222                 int olddisks = mddev->raid_disks - mddev->delta_disks;
4223
4224                 err = -EINVAL;
4225                 rdev_for_each(rdev, mddev) {
4226                         if (olddisks < n &&
4227                             rdev->data_offset < rdev->new_data_offset)
4228                                 goto out_unlock;
4229                         if (olddisks > n &&
4230                             rdev->data_offset > rdev->new_data_offset)
4231                                 goto out_unlock;
4232                 }
4233                 err = 0;
4234                 mddev->delta_disks = n - olddisks;
4235                 mddev->raid_disks = n;
4236                 mddev->reshape_backwards = (mddev->delta_disks < 0);
4237         } else
4238                 mddev->raid_disks = n;
4239 out_unlock:
4240         mddev_unlock(mddev);
4241         return err ? err : len;
4242 }
4243 static struct md_sysfs_entry md_raid_disks =
4244 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
4245
4246 static ssize_t
4247 uuid_show(struct mddev *mddev, char *page)
4248 {
4249         return sprintf(page, "%pU\n", mddev->uuid);
4250 }
4251 static struct md_sysfs_entry md_uuid =
4252 __ATTR(uuid, S_IRUGO, uuid_show, NULL);
4253
4254 static ssize_t
4255 chunk_size_show(struct mddev *mddev, char *page)
4256 {
4257         if (mddev->reshape_position != MaxSector &&
4258             mddev->chunk_sectors != mddev->new_chunk_sectors)
4259                 return sprintf(page, "%d (%d)\n",
4260                                mddev->new_chunk_sectors << 9,
4261                                mddev->chunk_sectors << 9);
4262         return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
4263 }
4264
4265 static ssize_t
4266 chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
4267 {
4268         unsigned long n;
4269         int err;
4270
4271         err = kstrtoul(buf, 10, &n);
4272         if (err < 0)
4273                 return err;
4274
4275         err = mddev_lock(mddev);
4276         if (err)
4277                 return err;
4278         if (mddev->pers) {
4279                 if (mddev->pers->check_reshape == NULL)
4280                         err = -EBUSY;
4281                 else if (mddev->ro)
4282                         err = -EROFS;
4283                 else {
4284                         mddev->new_chunk_sectors = n >> 9;
4285                         err = mddev->pers->check_reshape(mddev);
4286                         if (err)
4287                                 mddev->new_chunk_sectors = mddev->chunk_sectors;
4288                 }
4289         } else {
4290                 mddev->new_chunk_sectors = n >> 9;
4291                 if (mddev->reshape_position == MaxSector)
4292                         mddev->chunk_sectors = n >> 9;
4293         }
4294         mddev_unlock(mddev);
4295         return err ?: len;
4296 }
4297 static struct md_sysfs_entry md_chunk_size =
4298 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
4299
4300 static ssize_t
4301 resync_start_show(struct mddev *mddev, char *page)
4302 {
4303         if (mddev->recovery_cp == MaxSector)
4304                 return sprintf(page, "none\n");
4305         return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
4306 }
4307
4308 static ssize_t
4309 resync_start_store(struct mddev *mddev, const char *buf, size_t len)
4310 {
4311         unsigned long long n;
4312         int err;
4313
4314         if (cmd_match(buf, "none"))
4315                 n = MaxSector;
4316         else {
4317                 err = kstrtoull(buf, 10, &n);
4318                 if (err < 0)
4319                         return err;
4320                 if (n != (sector_t)n)
4321                         return -EINVAL;
4322         }
4323
4324         err = mddev_lock(mddev);
4325         if (err)
4326                 return err;
4327         if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
4328                 err = -EBUSY;
4329
4330         if (!err) {
4331                 mddev->recovery_cp = n;
4332                 if (mddev->pers)
4333                         set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
4334         }
4335         mddev_unlock(mddev);
4336         return err ?: len;
4337 }
4338 static struct md_sysfs_entry md_resync_start =
4339 __ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR,
4340                 resync_start_show, resync_start_store);
4341
4342 /*
4343  * The array state can be:
4344  *
4345  * clear
4346  *     No devices, no size, no level
4347  *     Equivalent to STOP_ARRAY ioctl
4348  * inactive
4349  *     May have some settings, but array is not active
4350  *        all IO results in error
4351  *     When written, doesn't tear down array, but just stops it
4352  * suspended (not supported yet)
4353  *     All IO requests will block. The array can be reconfigured.
4354  *     Writing this, if accepted, will block until array is quiescent
4355  * readonly
4356  *     no resync can happen.  no superblocks get written.
4357  *     write requests fail
4358  * read-auto
4359  *     like readonly, but behaves like 'clean' on a write request.
4360  *
4361  * clean - no pending writes, but otherwise active.
4362  *     When written to inactive array, starts without resync
4363  *     If a write request arrives then
4364  *       if metadata is known, mark 'dirty' and switch to 'active'.
4365  *       if not known, block and switch to write-pending
4366  *     If written to an active array that has pending writes, then fails.
4367  * active
4368  *     fully active: IO and resync can be happening.
4369  *     When written to inactive array, starts with resync
4370  *
4371  * write-pending
4372  *     clean, but writes are blocked waiting for 'active' to be written.
4373  *
4374  * active-idle
4375  *     like active, but no writes have been seen for a while (100msec).
4376  *
4377  * broken
4378  *     RAID0/LINEAR-only: same as clean, but array is missing a member.
4379  *     It's useful because RAID0/LINEAR mounted-arrays aren't stopped
4380  *     when a member is gone, so this state will at least alert the
4381  *     user that something is wrong.
4382  */
4383 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
4384                    write_pending, active_idle, broken, bad_word};
4385 static char *array_states[] = {
4386         "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
4387         "write-pending", "active-idle", "broken", NULL };
4388
4389 static int match_word(const char *word, char **list)
4390 {
4391         int n;
4392         for (n=0; list[n]; n++)
4393                 if (cmd_match(word, list[n]))
4394                         break;
4395         return n;
4396 }
4397
4398 static ssize_t
4399 array_state_show(struct mddev *mddev, char *page)
4400 {
4401         enum array_state st = inactive;
4402
4403         if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags)) {
4404                 switch(mddev->ro) {
4405                 case 1:
4406                         st = readonly;
4407                         break;
4408                 case 2:
4409                         st = read_auto;
4410                         break;
4411                 case 0:
4412                         spin_lock(&mddev->lock);
4413                         if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
4414                                 st = write_pending;
4415                         else if (mddev->in_sync)
4416                                 st = clean;
4417                         else if (mddev->safemode)
4418                                 st = active_idle;
4419                         else
4420                                 st = active;
4421                         spin_unlock(&mddev->lock);
4422                 }
4423
4424                 if (test_bit(MD_BROKEN, &mddev->flags) && st == clean)
4425                         st = broken;
4426         } else {
4427                 if (list_empty(&mddev->disks) &&
4428                     mddev->raid_disks == 0 &&
4429                     mddev->dev_sectors == 0)
4430                         st = clear;
4431                 else
4432                         st = inactive;
4433         }
4434         return sprintf(page, "%s\n", array_states[st]);
4435 }
4436
4437 static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
4438 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
4439 static int restart_array(struct mddev *mddev);
4440
4441 static ssize_t
4442 array_state_store(struct mddev *mddev, const char *buf, size_t len)
4443 {
4444         int err = 0;
4445         enum array_state st = match_word(buf, array_states);
4446
4447         if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) {
4448                 /* don't take reconfig_mutex when toggling between
4449                  * clean and active
4450                  */
4451                 spin_lock(&mddev->lock);
4452                 if (st == active) {
4453                         restart_array(mddev);
4454                         clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
4455                         md_wakeup_thread(mddev->thread);
4456                         wake_up(&mddev->sb_wait);
4457                 } else /* st == clean */ {
4458                         restart_array(mddev);
4459                         if (!set_in_sync(mddev))
4460                                 err = -EBUSY;
4461                 }
4462                 if (!err)
4463                         sysfs_notify_dirent_safe(mddev->sysfs_state);
4464                 spin_unlock(&mddev->lock);
4465                 return err ?: len;
4466         }
4467         err = mddev_lock(mddev);
4468         if (err)
4469                 return err;
4470         err = -EINVAL;
4471         switch(st) {
4472         case bad_word:
4473                 break;
4474         case clear:
4475                 /* stopping an active array */
4476                 err = do_md_stop(mddev, 0, NULL);
4477                 break;
4478         case inactive:
4479                 /* stopping an active array */
4480                 if (mddev->pers)
4481                         err = do_md_stop(mddev, 2, NULL);
4482                 else
4483                         err = 0; /* already inactive */
4484                 break;
4485         case suspended:
4486                 break; /* not supported yet */
4487         case readonly:
4488                 if (mddev->pers)
4489                         err = md_set_readonly(mddev, NULL);
4490                 else {
4491                         mddev->ro = 1;
4492                         set_disk_ro(mddev->gendisk, 1);
4493                         err = do_md_run(mddev);
4494                 }
4495                 break;
4496         case read_auto:
4497                 if (mddev->pers) {
4498                         if (mddev->ro == 0)
4499                                 err = md_set_readonly(mddev, NULL);
4500                         else if (mddev->ro == 1)
4501                                 err = restart_array(mddev);
4502                         if (err == 0) {
4503                                 mddev->ro = 2;
4504                                 set_disk_ro(mddev->gendisk, 0);
4505                         }
4506                 } else {
4507                         mddev->ro = 2;
4508                         err = do_md_run(mddev);
4509                 }
4510                 break;
4511         case clean:
4512                 if (mddev->pers) {
4513                         err = restart_array(mddev);
4514                         if (err)
4515                                 break;
4516                         spin_lock(&mddev->lock);
4517                         if (!set_in_sync(mddev))
4518                                 err = -EBUSY;
4519                         spin_unlock(&mddev->lock);
4520                 } else
4521                         err = -EINVAL;
4522                 break;
4523         case active:
4524                 if (mddev->pers) {
4525                         err = restart_array(mddev);
4526                         if (err)
4527                                 break;
4528                         clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
4529                         wake_up(&mddev->sb_wait);
4530                         err = 0;
4531                 } else {
4532                         mddev->ro = 0;
4533                         set_disk_ro(mddev->gendisk, 0);
4534                         err = do_md_run(mddev);
4535                 }
4536                 break;
4537         case write_pending:
4538         case active_idle:
4539         case broken:
4540                 /* these cannot be set */
4541                 break;
4542         }
4543
4544         if (!err) {
4545                 if (mddev->hold_active == UNTIL_IOCTL)
4546                         mddev->hold_active = 0;
4547                 sysfs_notify_dirent_safe(mddev->sysfs_state);
4548         }
4549         mddev_unlock(mddev);
4550         return err ?: len;
4551 }
4552 static struct md_sysfs_entry md_array_state =
4553 __ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
4554
4555 static ssize_t
4556 max_corrected_read_errors_show(struct mddev *mddev, char *page) {
4557         return sprintf(page, "%d\n",
4558                        atomic_read(&mddev->max_corr_read_errors));
4559 }
4560
4561 static ssize_t
4562 max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len)
4563 {
4564         unsigned int n;
4565         int rv;
4566
4567         rv = kstrtouint(buf, 10, &n);
4568         if (rv < 0)
4569                 return rv;
4570         atomic_set(&mddev->max_corr_read_errors, n);
4571         return len;
4572 }
4573
4574 static struct md_sysfs_entry max_corr_read_errors =
4575 __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
4576         max_corrected_read_errors_store);
4577
4578 static ssize_t
4579 null_show(struct mddev *mddev, char *page)
4580 {
4581         return -EINVAL;
4582 }
4583
4584 /* need to ensure rdev_delayed_delete() has completed */
4585 static void flush_rdev_wq(struct mddev *mddev)
4586 {
4587         struct md_rdev *rdev;
4588
4589         rcu_read_lock();
4590         rdev_for_each_rcu(rdev, mddev)
4591                 if (work_pending(&rdev->del_work)) {
4592                         flush_workqueue(md_rdev_misc_wq);
4593                         break;
4594                 }
4595         rcu_read_unlock();
4596 }
4597
4598 static ssize_t
4599 new_dev_store(struct mddev *mddev, const char *buf, size_t len)
4600 {
4601         /* buf must be %d:%d\n? giving major and minor numbers */
4602         /* The new device is added to the array.
4603          * If the array has a persistent superblock, we read the
4604          * superblock to initialise info and check validity.
4605          * Otherwise, only checking done is that in bind_rdev_to_array,
4606          * which mainly checks size.
4607          */
4608         char *e;
4609         int major = simple_strtoul(buf, &e, 10);
4610         int minor;
4611         dev_t dev;
4612         struct md_rdev *rdev;
4613         int err;
4614
4615         if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
4616                 return -EINVAL;
4617         minor = simple_strtoul(e+1, &e, 10);
4618         if (*e && *e != '\n')
4619                 return -EINVAL;
4620         dev = MKDEV(major, minor);
4621         if (major != MAJOR(dev) ||
4622             minor != MINOR(dev))
4623                 return -EOVERFLOW;
4624
4625         flush_rdev_wq(mddev);
4626         err = mddev_lock(mddev);
4627         if (err)
4628                 return err;
4629         if (mddev->persistent) {
4630                 rdev = md_import_device(dev, mddev->major_version,
4631                                         mddev->minor_version);
4632                 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
4633                         struct md_rdev *rdev0
4634                                 = list_entry(mddev->disks.next,
4635                                              struct md_rdev, same_set);
4636                         err = super_types[mddev->major_version]
4637                                 .load_super(rdev, rdev0, mddev->minor_version);
4638                         if (err < 0)
4639                                 goto out;
4640                 }
4641         } else if (mddev->external)
4642                 rdev = md_import_device(dev, -2, -1);
4643         else
4644                 rdev = md_import_device(dev, -1, -1);
4645
4646         if (IS_ERR(rdev)) {
4647                 mddev_unlock(mddev);
4648                 return PTR_ERR(rdev);
4649         }
4650         err = bind_rdev_to_array(rdev, mddev);
4651  out:
4652         if (err)
4653                 export_rdev(rdev);
4654         mddev_unlock(mddev);
4655         if (!err)
4656                 md_new_event(mddev);
4657         return err ? err : len;
4658 }
4659
4660 static struct md_sysfs_entry md_new_device =
4661 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
4662
4663 static ssize_t
4664 bitmap_store(struct mddev *mddev, const char *buf, size_t len)
4665 {
4666         char *end;
4667         unsigned long chunk, end_chunk;
4668         int err;
4669
4670         err = mddev_lock(mddev);
4671         if (err)
4672                 return err;
4673         if (!mddev->bitmap)
4674                 goto out;
4675         /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
4676         while (*buf) {
4677                 chunk = end_chunk = simple_strtoul(buf, &end, 0);
4678                 if (buf == end) break;
4679                 if (*end == '-') { /* range */
4680                         buf = end + 1;
4681                         end_chunk = simple_strtoul(buf, &end, 0);
4682                         if (buf == end) break;
4683                 }
4684                 if (*end && !isspace(*end)) break;
4685                 md_bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
4686                 buf = skip_spaces(end);
4687         }
4688         md_bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
4689 out:
4690         mddev_unlock(mddev);
4691         return len;
4692 }
4693
4694 static struct md_sysfs_entry md_bitmap =
4695 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
4696
4697 static ssize_t
4698 size_show(struct mddev *mddev, char *page)
4699 {
4700         return sprintf(page, "%llu\n",
4701                 (unsigned long long)mddev->dev_sectors / 2);
4702 }
4703
4704 static int update_size(struct mddev *mddev, sector_t num_sectors);
4705
4706 static ssize_t
4707 size_store(struct mddev *mddev, const char *buf, size_t len)
4708 {
4709         /* If array is inactive, we can reduce the component size, but
4710          * not increase it (except from 0).
4711          * If array is active, we can try an on-line resize
4712          */
4713         sector_t sectors;
4714         int err = strict_blocks_to_sectors(buf, &sectors);
4715
4716         if (err < 0)
4717                 return err;
4718         err = mddev_lock(mddev);
4719         if (err)
4720                 return err;
4721         if (mddev->pers) {
4722                 err = update_size(mddev, sectors);
4723                 if (err == 0)
4724                         md_update_sb(mddev, 1);
4725         } else {
4726                 if (mddev->dev_sectors == 0 ||
4727                     mddev->dev_sectors > sectors)
4728                         mddev->dev_sectors = sectors;
4729                 else
4730                         err = -ENOSPC;
4731         }
4732         mddev_unlock(mddev);
4733         return err ? err : len;
4734 }
4735
4736 static struct md_sysfs_entry md_size =
4737 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
4738
4739 /* Metadata version.
4740  * This is one of
4741  *   'none' for arrays with no metadata (good luck...)
4742  *   'external' for arrays with externally managed metadata,
4743  * or N.M for internally known formats
4744  */
4745 static ssize_t
4746 metadata_show(struct mddev *mddev, char *page)
4747 {
4748         if (mddev->persistent)
4749                 return sprintf(page, "%d.%d\n",
4750                                mddev->major_version, mddev->minor_version);
4751         else if (mddev->external)
4752                 return sprintf(page, "external:%s\n", mddev->metadata_type);
4753         else
4754                 return sprintf(page, "none\n");
4755 }
4756
4757 static ssize_t
4758 metadata_store(struct mddev *mddev, const char *buf, size_t len)
4759 {
4760         int major, minor;
4761         char *e;
4762         int err;
4763         /* Changing the details of 'external' metadata is
4764          * always permitted.  Otherwise there must be
4765          * no devices attached to the array.
4766          */
4767
4768         err = mddev_lock(mddev);
4769         if (err)
4770                 return err;
4771         err = -EBUSY;
4772         if (mddev->external && strncmp(buf, "external:", 9) == 0)
4773                 ;
4774         else if (!list_empty(&mddev->disks))
4775                 goto out_unlock;
4776
4777         err = 0;
4778         if (cmd_match(buf, "none")) {
4779                 mddev->persistent = 0;
4780                 mddev->external = 0;
4781                 mddev->major_version = 0;
4782                 mddev->minor_version = 90;
4783                 goto out_unlock;
4784         }
4785         if (strncmp(buf, "external:", 9) == 0) {
4786                 size_t namelen = len-9;
4787                 if (namelen >= sizeof(mddev->metadata_type))
4788                         namelen = sizeof(mddev->metadata_type)-1;
4789                 strncpy(mddev->metadata_type, buf+9, namelen);
4790                 mddev->metadata_type[namelen] = 0;
4791                 if (namelen && mddev->metadata_type[namelen-1] == '\n')
4792                         mddev->metadata_type[--namelen] = 0;
4793                 mddev->persistent = 0;
4794                 mddev->external = 1;
4795                 mddev->major_version = 0;
4796                 mddev->minor_version = 90;
4797                 goto out_unlock;
4798         }
4799         major = simple_strtoul(buf, &e, 10);
4800         err = -EINVAL;
4801         if (e==buf || *e != '.')
4802                 goto out_unlock;
4803         buf = e+1;
4804         minor = simple_strtoul(buf, &e, 10);
4805         if (e==buf || (*e && *e != '\n') )
4806                 goto out_unlock;
4807         err = -ENOENT;
4808         if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
4809                 goto out_unlock;
4810         mddev->major_version = major;
4811         mddev->minor_version = minor;
4812         mddev->persistent = 1;
4813         mddev->external = 0;
4814         err = 0;
4815 out_unlock:
4816         mddev_unlock(mddev);
4817         return err ?: len;
4818 }
4819
4820 static struct md_sysfs_entry md_metadata =
4821 __ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
4822
4823 static ssize_t
4824 action_show(struct mddev *mddev, char *page)
4825 {
4826         char *type = "idle";
4827         unsigned long recovery = mddev->recovery;
4828         if (test_bit(MD_RECOVERY_FROZEN, &recovery))
4829                 type = "frozen";
4830         else if (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
4831             (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) {
4832                 if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
4833                         type = "reshape";
4834                 else if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
4835                         if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
4836                                 type = "resync";
4837                         else if (test_bit(MD_RECOVERY_CHECK, &recovery))
4838                                 type = "check";
4839                         else
4840                                 type = "repair";
4841                 } else if (test_bit(MD_RECOVERY_RECOVER, &recovery))
4842                         type = "recover";
4843                 else if (mddev->reshape_position != MaxSector)
4844                         type = "reshape";
4845         }
4846         return sprintf(page, "%s\n", type);
4847 }
4848
4849 static ssize_t
4850 action_store(struct mddev *mddev, const char *page, size_t len)
4851 {
4852         if (!mddev->pers || !mddev->pers->sync_request)
4853                 return -EINVAL;
4854
4855
4856         if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
4857                 if (cmd_match(page, "frozen"))
4858                         set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4859                 else
4860                         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4861                 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
4862                     mddev_lock(mddev) == 0) {
4863                         if (work_pending(&mddev->del_work))
4864                                 flush_workqueue(md_misc_wq);
4865                         if (mddev->sync_thread) {
4866                                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4867                                 md_reap_sync_thread(mddev);
4868                         }
4869                         mddev_unlock(mddev);
4870                 }
4871         } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4872                 return -EBUSY;
4873         else if (cmd_match(page, "resync"))
4874                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4875         else if (cmd_match(page, "recover")) {
4876                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4877                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
4878         } else if (cmd_match(page, "reshape")) {
4879                 int err;
4880                 if (mddev->pers->start_reshape == NULL)
4881                         return -EINVAL;
4882                 err = mddev_lock(mddev);
4883                 if (!err) {
4884                         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4885                                 err =  -EBUSY;
4886                         else {
4887                                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4888                                 err = mddev->pers->start_reshape(mddev);
4889                         }
4890                         mddev_unlock(mddev);
4891                 }
4892                 if (err)
4893                         return err;
4894                 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
4895         } else {
4896                 if (cmd_match(page, "check"))
4897                         set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4898                 else if (!cmd_match(page, "repair"))
4899                         return -EINVAL;
4900                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4901                 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
4902                 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4903         }
4904         if (mddev->ro == 2) {
4905                 /* A write to sync_action is enough to justify
4906                  * canceling read-auto mode
4907                  */
4908                 mddev->ro = 0;
4909                 md_wakeup_thread(mddev->sync_thread);
4910         }
4911         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4912         md_wakeup_thread(mddev->thread);
4913         sysfs_notify_dirent_safe(mddev->sysfs_action);
4914         return len;
4915 }
4916
4917 static struct md_sysfs_entry md_scan_mode =
4918 __ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
4919
4920 static ssize_t
4921 last_sync_action_show(struct mddev *mddev, char *page)
4922 {
4923         return sprintf(page, "%s\n", mddev->last_sync_action);
4924 }
4925
4926 static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action);
4927
4928 static ssize_t
4929 mismatch_cnt_show(struct mddev *mddev, char *page)
4930 {
4931         return sprintf(page, "%llu\n",
4932                        (unsigned long long)
4933                        atomic64_read(&mddev->resync_mismatches));
4934 }
4935
4936 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
4937
4938 static ssize_t
4939 sync_min_show(struct mddev *mddev, char *page)
4940 {
4941         return sprintf(page, "%d (%s)\n", speed_min(mddev),
4942                        mddev->sync_speed_min ? "local": "system");
4943 }
4944
4945 static ssize_t
4946 sync_min_store(struct mddev *mddev, const char *buf, size_t len)
4947 {
4948         unsigned int min;
4949         int rv;
4950
4951         if (strncmp(buf, "system", 6)==0) {
4952                 min = 0;
4953         } else {
4954                 rv = kstrtouint(buf, 10, &min);
4955                 if (rv < 0)
4956                         return rv;
4957                 if (min == 0)
4958                         return -EINVAL;
4959         }
4960         mddev->sync_speed_min = min;
4961         return len;
4962 }
4963
4964 static struct md_sysfs_entry md_sync_min =
4965 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
4966
4967 static ssize_t
4968 sync_max_show(struct mddev *mddev, char *page)
4969 {
4970         return sprintf(page, "%d (%s)\n", speed_max(mddev),
4971                        mddev->sync_speed_max ? "local": "system");
4972 }
4973
4974 static ssize_t
4975 sync_max_store(struct mddev *mddev, const char *buf, size_t len)
4976 {
4977         unsigned int max;
4978         int rv;
4979
4980         if (strncmp(buf, "system", 6)==0) {
4981                 max = 0;
4982         } else {
4983                 rv = kstrtouint(buf, 10, &max);
4984                 if (rv < 0)
4985                         return rv;
4986                 if (max == 0)
4987                         return -EINVAL;
4988         }
4989         mddev->sync_speed_max = max;
4990         return len;
4991 }
4992
4993 static struct md_sysfs_entry md_sync_max =
4994 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
4995
4996 static ssize_t
4997 degraded_show(struct mddev *mddev, char *page)
4998 {
4999         return sprintf(page, "%d\n", mddev->degraded);
5000 }
5001 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
5002
5003 static ssize_t
5004 sync_force_parallel_show(struct mddev *mddev, char *page)
5005 {
5006         return sprintf(page, "%d\n", mddev->parallel_resync);
5007 }
5008
5009 static ssize_t
5010 sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len)
5011 {
5012         long n;
5013
5014         if (kstrtol(buf, 10, &n))
5015                 return -EINVAL;
5016
5017         if (n != 0 && n != 1)
5018                 return -EINVAL;
5019
5020         mddev->parallel_resync = n;
5021
5022         if (mddev->sync_thread)
5023                 wake_up(&resync_wait);
5024
5025         return len;
5026 }
5027
5028 /* force parallel resync, even with shared block devices */
5029 static struct md_sysfs_entry md_sync_force_parallel =
5030 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
5031        sync_force_parallel_show, sync_force_parallel_store);
5032
5033 static ssize_t
5034 sync_speed_show(struct mddev *mddev, char *page)
5035 {
5036         unsigned long resync, dt, db;
5037         if (mddev->curr_resync == 0)
5038                 return sprintf(page, "none\n");
5039         resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
5040         dt = (jiffies - mddev->resync_mark) / HZ;
5041         if (!dt) dt++;
5042         db = resync - mddev->resync_mark_cnt;
5043         return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
5044 }
5045
5046 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
5047
5048 static ssize_t
5049 sync_completed_show(struct mddev *mddev, char *page)
5050 {
5051         unsigned long long max_sectors, resync;
5052
5053         if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5054                 return sprintf(page, "none\n");
5055
5056         if (mddev->curr_resync == 1 ||
5057             mddev->curr_resync == 2)
5058                 return sprintf(page, "delayed\n");
5059
5060         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
5061             test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
5062                 max_sectors = mddev->resync_max_sectors;
5063         else
5064                 max_sectors = mddev->dev_sectors;
5065
5066         resync = mddev->curr_resync_completed;
5067         return sprintf(page, "%llu / %llu\n", resync, max_sectors);
5068 }
5069
5070 static struct md_sysfs_entry md_sync_completed =
5071         __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL);
5072
5073 static ssize_t
5074 min_sync_show(struct mddev *mddev, char *page)
5075 {
5076         return sprintf(page, "%llu\n",
5077                        (unsigned long long)mddev->resync_min);
5078 }
5079 static ssize_t
5080 min_sync_store(struct mddev *mddev, const char *buf, size_t len)
5081 {
5082         unsigned long long min;
5083         int err;
5084
5085         if (kstrtoull(buf, 10, &min))
5086                 return -EINVAL;
5087
5088         spin_lock(&mddev->lock);
5089         err = -EINVAL;
5090         if (min > mddev->resync_max)
5091                 goto out_unlock;
5092
5093         err = -EBUSY;
5094         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5095                 goto out_unlock;
5096
5097         /* Round down to multiple of 4K for safety */
5098         mddev->resync_min = round_down(min, 8);
5099         err = 0;
5100
5101 out_unlock:
5102         spin_unlock(&mddev->lock);
5103         return err ?: len;
5104 }
5105
5106 static struct md_sysfs_entry md_min_sync =
5107 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
5108
5109 static ssize_t
5110 max_sync_show(struct mddev *mddev, char *page)
5111 {
5112         if (mddev->resync_max == MaxSector)
5113                 return sprintf(page, "max\n");
5114         else
5115                 return sprintf(page, "%llu\n",
5116                                (unsigned long long)mddev->resync_max);
5117 }
5118 static ssize_t
5119 max_sync_store(struct mddev *mddev, const char *buf, size_t len)
5120 {
5121         int err;
5122         spin_lock(&mddev->lock);
5123         if (strncmp(buf, "max", 3) == 0)
5124                 mddev->resync_max = MaxSector;
5125         else {
5126                 unsigned long long max;
5127                 int chunk;
5128
5129                 err = -EINVAL;
5130                 if (kstrtoull(buf, 10, &max))
5131                         goto out_unlock;
5132                 if (max < mddev->resync_min)
5133                         goto out_unlock;
5134
5135                 err = -EBUSY;
5136                 if (max < mddev->resync_max &&
5137                     mddev->ro == 0 &&
5138                     test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5139                         goto out_unlock;
5140
5141                 /* Must be a multiple of chunk_size */
5142                 chunk = mddev->chunk_sectors;
5143                 if (chunk) {
5144                         sector_t temp = max;
5145
5146                         err = -EINVAL;
5147                         if (sector_div(temp, chunk))
5148                                 goto out_unlock;
5149                 }
5150                 mddev->resync_max = max;
5151         }
5152         wake_up(&mddev->recovery_wait);
5153         err = 0;
5154 out_unlock:
5155         spin_unlock(&mddev->lock);
5156         return err ?: len;
5157 }
5158
5159 static struct md_sysfs_entry md_max_sync =
5160 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
5161
5162 static ssize_t
5163 suspend_lo_show(struct mddev *mddev, char *page)
5164 {
5165         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
5166 }
5167
5168 static ssize_t
5169 suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
5170 {
5171         unsigned long long new;
5172         int err;
5173
5174         err = kstrtoull(buf, 10, &new);
5175         if (err < 0)
5176                 return err;
5177         if (new != (sector_t)new)
5178                 return -EINVAL;
5179
5180         err = mddev_lock(mddev);
5181         if (err)
5182                 return err;
5183         err = -EINVAL;
5184         if (mddev->pers == NULL ||
5185             mddev->pers->quiesce == NULL)
5186                 goto unlock;
5187         mddev_suspend(mddev);
5188         mddev->suspend_lo = new;
5189         mddev_resume(mddev);
5190
5191         err = 0;
5192 unlock:
5193         mddev_unlock(mddev);
5194         return err ?: len;
5195 }
5196 static struct md_sysfs_entry md_suspend_lo =
5197 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
5198
5199 static ssize_t
5200 suspend_hi_show(struct mddev *mddev, char *page)
5201 {
5202         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
5203 }
5204
5205 static ssize_t
5206 suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
5207 {
5208         unsigned long long new;
5209         int err;
5210
5211         err = kstrtoull(buf, 10, &new);
5212         if (err < 0)
5213                 return err;
5214         if (new != (sector_t)new)
5215                 return -EINVAL;
5216
5217         err = mddev_lock(mddev);
5218         if (err)
5219                 return err;
5220         err = -EINVAL;
5221         if (mddev->pers == NULL)
5222                 goto unlock;
5223
5224         mddev_suspend(mddev);
5225         mddev->suspend_hi = new;
5226         mddev_resume(mddev);
5227
5228         err = 0;
5229 unlock:
5230         mddev_unlock(mddev);
5231         return err ?: len;
5232 }
5233 static struct md_sysfs_entry md_suspend_hi =
5234 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
5235
5236 static ssize_t
5237 reshape_position_show(struct mddev *mddev, char *page)
5238 {
5239         if (mddev->reshape_position != MaxSector)
5240                 return sprintf(page, "%llu\n",
5241                                (unsigned long long)mddev->reshape_position);
5242         strcpy(page, "none\n");
5243         return 5;
5244 }
5245
5246 static ssize_t
5247 reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
5248 {
5249         struct md_rdev *rdev;
5250         unsigned long long new;
5251         int err;
5252
5253         err = kstrtoull(buf, 10, &new);
5254         if (err < 0)
5255                 return err;
5256         if (new != (sector_t)new)
5257                 return -EINVAL;
5258         err = mddev_lock(mddev);
5259         if (err)
5260                 return err;
5261         err = -EBUSY;
5262         if (mddev->pers)
5263                 goto unlock;
5264         mddev->reshape_position = new;
5265         mddev->delta_disks = 0;
5266         mddev->reshape_backwards = 0;
5267         mddev->new_level = mddev->level;
5268         mddev->new_layout = mddev->layout;
5269         mddev->new_chunk_sectors = mddev->chunk_sectors;
5270         rdev_for_each(rdev, mddev)
5271                 rdev->new_data_offset = rdev->data_offset;
5272         err = 0;
5273 unlock:
5274         mddev_unlock(mddev);
5275         return err ?: len;
5276 }
5277
5278 static struct md_sysfs_entry md_reshape_position =
5279 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
5280        reshape_position_store);
5281
5282 static ssize_t
5283 reshape_direction_show(struct mddev *mddev, char *page)
5284 {
5285         return sprintf(page, "%s\n",
5286                        mddev->reshape_backwards ? "backwards" : "forwards");
5287 }
5288
5289 static ssize_t
5290 reshape_direction_store(struct mddev *mddev, const char *buf, size_t len)
5291 {
5292         int backwards = 0;
5293         int err;
5294
5295         if (cmd_match(buf, "forwards"))
5296                 backwards = 0;
5297         else if (cmd_match(buf, "backwards"))
5298                 backwards = 1;
5299         else
5300                 return -EINVAL;
5301         if (mddev->reshape_backwards == backwards)
5302                 return len;
5303
5304         err = mddev_lock(mddev);
5305         if (err)
5306                 return err;
5307         /* check if we are allowed to change */
5308         if (mddev->delta_disks)
5309                 err = -EBUSY;
5310         else if (mddev->persistent &&
5311             mddev->major_version == 0)
5312                 err =  -EINVAL;
5313         else
5314                 mddev->reshape_backwards = backwards;
5315         mddev_unlock(mddev);
5316         return err ?: len;
5317 }
5318
5319 static struct md_sysfs_entry md_reshape_direction =
5320 __ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show,
5321        reshape_direction_store);
5322
5323 static ssize_t
5324 array_size_show(struct mddev *mddev, char *page)
5325 {
5326         if (mddev->external_size)
5327                 return sprintf(page, "%llu\n",
5328                                (unsigned long long)mddev->array_sectors/2);
5329         else
5330                 return sprintf(page, "default\n");
5331 }
5332
5333 static ssize_t
5334 array_size_store(struct mddev *mddev, const char *buf, size_t len)
5335 {
5336         sector_t sectors;
5337         int err;
5338
5339         err = mddev_lock(mddev);
5340         if (err)
5341                 return err;
5342
5343         /* cluster raid doesn't support change array_sectors */
5344         if (mddev_is_clustered(mddev)) {
5345                 mddev_unlock(mddev);
5346                 return -EINVAL;
5347         }
5348
5349         if (strncmp(buf, "default", 7) == 0) {
5350                 if (mddev->pers)
5351                         sectors = mddev->pers->size(mddev, 0, 0);
5352                 else
5353                         sectors = mddev->array_sectors;
5354
5355                 mddev->external_size = 0;
5356         } else {
5357                 if (strict_blocks_to_sectors(buf, &sectors) < 0)
5358                         err = -EINVAL;
5359                 else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
5360                         err = -E2BIG;
5361                 else
5362                         mddev->external_size = 1;
5363         }
5364
5365         if (!err) {
5366                 mddev->array_sectors = sectors;
5367                 if (mddev->pers) {
5368                         set_capacity(mddev->gendisk, mddev->array_sectors);
5369                         revalidate_disk_size(mddev->gendisk, true);
5370                 }
5371         }
5372         mddev_unlock(mddev);
5373         return err ?: len;
5374 }
5375
5376 static struct md_sysfs_entry md_array_size =
5377 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
5378        array_size_store);
5379
5380 static ssize_t
5381 consistency_policy_show(struct mddev *mddev, char *page)
5382 {
5383         int ret;
5384
5385         if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
5386                 ret = sprintf(page, "journal\n");
5387         } else if (test_bit(MD_HAS_PPL, &mddev->flags)) {
5388                 ret = sprintf(page, "ppl\n");
5389         } else if (mddev->bitmap) {
5390                 ret = sprintf(page, "bitmap\n");
5391         } else if (mddev->pers) {
5392                 if (mddev->pers->sync_request)
5393                         ret = sprintf(page, "resync\n");
5394                 else
5395                         ret = sprintf(page, "none\n");
5396         } else {
5397                 ret = sprintf(page, "unknown\n");
5398         }
5399
5400         return ret;
5401 }
5402
5403 static ssize_t
5404 consistency_policy_store(struct mddev *mddev, const char *buf, size_t len)
5405 {
5406         int err = 0;
5407
5408         if (mddev->pers) {
5409                 if (mddev->pers->change_consistency_policy)
5410                         err = mddev->pers->change_consistency_policy(mddev, buf);
5411                 else
5412                         err = -EBUSY;
5413         } else if (mddev->external && strncmp(buf, "ppl", 3) == 0) {
5414                 set_bit(MD_HAS_PPL, &mddev->flags);
5415         } else {
5416                 err = -EINVAL;
5417         }
5418
5419         return err ? err : len;
5420 }
5421
5422 static struct md_sysfs_entry md_consistency_policy =
5423 __ATTR(consistency_policy, S_IRUGO | S_IWUSR, consistency_policy_show,
5424        consistency_policy_store);
5425
5426 static ssize_t fail_last_dev_show(struct mddev *mddev, char *page)
5427 {
5428         return sprintf(page, "%d\n", mddev->fail_last_dev);
5429 }
5430
5431 /*
5432  * Setting fail_last_dev to true to allow last device to be forcibly removed
5433  * from RAID1/RAID10.
5434  */
5435 static ssize_t
5436 fail_last_dev_store(struct mddev *mddev, const char *buf, size_t len)
5437 {
5438         int ret;
5439         bool value;
5440
5441         ret = kstrtobool(buf, &value);
5442         if (ret)
5443                 return ret;
5444
5445         if (value != mddev->fail_last_dev)
5446                 mddev->fail_last_dev = value;
5447
5448         return len;
5449 }
5450 static struct md_sysfs_entry md_fail_last_dev =
5451 __ATTR(fail_last_dev, S_IRUGO | S_IWUSR, fail_last_dev_show,
5452        fail_last_dev_store);
5453
5454 static ssize_t serialize_policy_show(struct mddev *mddev, char *page)
5455 {
5456         if (mddev->pers == NULL || (mddev->pers->level != 1))
5457                 return sprintf(page, "n/a\n");
5458         else
5459                 return sprintf(page, "%d\n", mddev->serialize_policy);
5460 }
5461
5462 /*
5463  * Setting serialize_policy to true to enforce write IO is not reordered
5464  * for raid1.
5465  */
5466 static ssize_t
5467 serialize_policy_store(struct mddev *mddev, const char *buf, size_t len)
5468 {
5469         int err;
5470         bool value;
5471
5472         err = kstrtobool(buf, &value);
5473         if (err)
5474                 return err;
5475
5476         if (value == mddev->serialize_policy)
5477                 return len;
5478
5479         err = mddev_lock(mddev);
5480         if (err)
5481                 return err;
5482         if (mddev->pers == NULL || (mddev->pers->level != 1)) {
5483                 pr_err("md: serialize_policy is only effective for raid1\n");
5484                 err = -EINVAL;
5485                 goto unlock;
5486         }
5487
5488         mddev_suspend(mddev);
5489         if (value)
5490                 mddev_create_serial_pool(mddev, NULL, true);
5491         else
5492                 mddev_destroy_serial_pool(mddev, NULL, true);
5493         mddev->serialize_policy = value;
5494         mddev_resume(mddev);
5495 unlock:
5496         mddev_unlock(mddev);
5497         return err ?: len;
5498 }
5499
5500 static struct md_sysfs_entry md_serialize_policy =
5501 __ATTR(serialize_policy, S_IRUGO | S_IWUSR, serialize_policy_show,
5502        serialize_policy_store);
5503
5504
5505 static struct attribute *md_default_attrs[] = {
5506         &md_level.attr,
5507         &md_layout.attr,
5508         &md_raid_disks.attr,
5509         &md_uuid.attr,
5510         &md_chunk_size.attr,
5511         &md_size.attr,
5512         &md_resync_start.attr,
5513         &md_metadata.attr,
5514         &md_new_device.attr,
5515         &md_safe_delay.attr,
5516         &md_array_state.attr,
5517         &md_reshape_position.attr,
5518         &md_reshape_direction.attr,
5519         &md_array_size.attr,
5520         &max_corr_read_errors.attr,
5521         &md_consistency_policy.attr,
5522         &md_fail_last_dev.attr,
5523         &md_serialize_policy.attr,
5524         NULL,
5525 };
5526
5527 static struct attribute *md_redundancy_attrs[] = {
5528         &md_scan_mode.attr,
5529         &md_last_scan_mode.attr,
5530         &md_mismatches.attr,
5531         &md_sync_min.attr,
5532         &md_sync_max.attr,
5533         &md_sync_speed.attr,
5534         &md_sync_force_parallel.attr,
5535         &md_sync_completed.attr,
5536         &md_min_sync.attr,
5537         &md_max_sync.attr,
5538         &md_suspend_lo.attr,
5539         &md_suspend_hi.attr,
5540         &md_bitmap.attr,
5541         &md_degraded.attr,
5542         NULL,
5543 };
5544 static struct attribute_group md_redundancy_group = {
5545         .name = NULL,
5546         .attrs = md_redundancy_attrs,
5547 };
5548
5549 static ssize_t
5550 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
5551 {
5552         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
5553         struct mddev *mddev = container_of(kobj, struct mddev, kobj);
5554         ssize_t rv;
5555
5556         if (!entry->show)
5557                 return -EIO;
5558         spin_lock(&all_mddevs_lock);
5559         if (list_empty(&mddev->all_mddevs)) {
5560                 spin_unlock(&all_mddevs_lock);
5561                 return -EBUSY;
5562         }
5563         mddev_get(mddev);
5564         spin_unlock(&all_mddevs_lock);
5565
5566         rv = entry->show(mddev, page);
5567         mddev_put(mddev);
5568         return rv;
5569 }
5570
5571 static ssize_t
5572 md_attr_store(struct kobject *kobj, struct attribute *attr,
5573               const char *page, size_t length)
5574 {
5575         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
5576         struct mddev *mddev = container_of(kobj, struct mddev, kobj);
5577         ssize_t rv;
5578
5579         if (!entry->store)
5580                 return -EIO;
5581         if (!capable(CAP_SYS_ADMIN))
5582                 return -EACCES;
5583         spin_lock(&all_mddevs_lock);
5584         if (list_empty(&mddev->all_mddevs)) {
5585                 spin_unlock(&all_mddevs_lock);
5586                 return -EBUSY;
5587         }
5588         mddev_get(mddev);
5589         spin_unlock(&all_mddevs_lock);
5590         rv = entry->store(mddev, page, length);
5591         mddev_put(mddev);
5592         return rv;
5593 }
5594
5595 static void md_free(struct kobject *ko)
5596 {
5597         struct mddev *mddev = container_of(ko, struct mddev, kobj);
5598
5599         if (mddev->sysfs_state)
5600                 sysfs_put(mddev->sysfs_state);
5601         if (mddev->sysfs_level)
5602                 sysfs_put(mddev->sysfs_level);
5603
5604         if (mddev->gendisk)
5605                 del_gendisk(mddev->gendisk);
5606         if (mddev->queue)
5607                 blk_cleanup_queue(mddev->queue);
5608         if (mddev->gendisk)
5609                 put_disk(mddev->gendisk);
5610         percpu_ref_exit(&mddev->writes_pending);
5611
5612         bioset_exit(&mddev->bio_set);
5613         bioset_exit(&mddev->sync_set);
5614         kfree(mddev);
5615 }
5616
5617 static const struct sysfs_ops md_sysfs_ops = {
5618         .show   = md_attr_show,
5619         .store  = md_attr_store,
5620 };
5621 static struct kobj_type md_ktype = {
5622         .release        = md_free,
5623         .sysfs_ops      = &md_sysfs_ops,
5624         .default_attrs  = md_default_attrs,
5625 };
5626
5627 int mdp_major = 0;
5628
5629 static void mddev_delayed_delete(struct work_struct *ws)
5630 {
5631         struct mddev *mddev = container_of(ws, struct mddev, del_work);
5632
5633         sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
5634         kobject_del(&mddev->kobj);
5635         kobject_put(&mddev->kobj);
5636 }
5637
5638 static void no_op(struct percpu_ref *r) {}
5639
5640 int mddev_init_writes_pending(struct mddev *mddev)
5641 {
5642         if (mddev->writes_pending.percpu_count_ptr)
5643                 return 0;
5644         if (percpu_ref_init(&mddev->writes_pending, no_op,
5645                             PERCPU_REF_ALLOW_REINIT, GFP_KERNEL) < 0)
5646                 return -ENOMEM;
5647         /* We want to start with the refcount at zero */
5648         percpu_ref_put(&mddev->writes_pending);
5649         return 0;
5650 }
5651 EXPORT_SYMBOL_GPL(mddev_init_writes_pending);
5652
5653 static int md_alloc(dev_t dev, char *name)
5654 {
5655         /*
5656          * If dev is zero, name is the name of a device to allocate with
5657          * an arbitrary minor number.  It will be "md_???"
5658          * If dev is non-zero it must be a device number with a MAJOR of
5659          * MD_MAJOR or mdp_major.  In this case, if "name" is NULL, then
5660          * the device is being created by opening a node in /dev.
5661          * If "name" is not NULL, the device is being created by
5662          * writing to /sys/module/md_mod/parameters/new_array.
5663          */
5664         static DEFINE_MUTEX(disks_mutex);
5665         struct mddev *mddev = mddev_find_or_alloc(dev);
5666         struct gendisk *disk;
5667         int partitioned;
5668         int shift;
5669         int unit;
5670         int error;
5671
5672         if (!mddev)
5673                 return -ENODEV;
5674
5675         partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
5676         shift = partitioned ? MdpMinorShift : 0;
5677         unit = MINOR(mddev->unit) >> shift;
5678
5679         /* wait for any previous instance of this device to be
5680          * completely removed (mddev_delayed_delete).
5681          */
5682         flush_workqueue(md_misc_wq);
5683
5684         mutex_lock(&disks_mutex);
5685         error = -EEXIST;
5686         if (mddev->gendisk)
5687                 goto abort;
5688
5689         if (name && !dev) {
5690                 /* Need to ensure that 'name' is not a duplicate.
5691                  */
5692                 struct mddev *mddev2;
5693                 spin_lock(&all_mddevs_lock);
5694
5695                 list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
5696                         if (mddev2->gendisk &&
5697                             strcmp(mddev2->gendisk->disk_name, name) == 0) {
5698                                 spin_unlock(&all_mddevs_lock);
5699                                 goto abort;
5700                         }
5701                 spin_unlock(&all_mddevs_lock);
5702         }
5703         if (name && dev)
5704                 /*
5705                  * Creating /dev/mdNNN via "newarray", so adjust hold_active.
5706                  */
5707                 mddev->hold_active = UNTIL_STOP;
5708
5709         error = -ENOMEM;
5710         mddev->queue = blk_alloc_queue(NUMA_NO_NODE);
5711         if (!mddev->queue)
5712                 goto abort;
5713
5714         blk_set_stacking_limits(&mddev->queue->limits);
5715
5716         disk = alloc_disk(1 << shift);
5717         if (!disk) {
5718                 blk_cleanup_queue(mddev->queue);
5719                 mddev->queue = NULL;
5720                 goto abort;
5721         }
5722         disk->major = MAJOR(mddev->unit);
5723         disk->first_minor = unit << shift;
5724         if (name)
5725                 strcpy(disk->disk_name, name);
5726         else if (partitioned)
5727                 sprintf(disk->disk_name, "md_d%d", unit);
5728         else
5729                 sprintf(disk->disk_name, "md%d", unit);
5730         disk->fops = &md_fops;
5731         disk->private_data = mddev;
5732         disk->queue = mddev->queue;
5733         blk_queue_write_cache(mddev->queue, true, true);
5734         /* Allow extended partitions.  This makes the
5735          * 'mdp' device redundant, but we can't really
5736          * remove it now.
5737          */
5738         disk->flags |= GENHD_FL_EXT_DEVT;
5739         disk->events |= DISK_EVENT_MEDIA_CHANGE;
5740         mddev->gendisk = disk;
5741         add_disk(disk);
5742
5743         error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md");
5744         if (error) {
5745                 /* This isn't possible, but as kobject_init_and_add is marked
5746                  * __must_check, we must do something with the result
5747                  */
5748                 pr_debug("md: cannot register %s/md - name in use\n",
5749                          disk->disk_name);
5750                 error = 0;
5751         }
5752         if (mddev->kobj.sd &&
5753             sysfs_create_group(&mddev->kobj, &md_bitmap_group))
5754                 pr_debug("pointless warning\n");
5755  abort:
5756         mutex_unlock(&disks_mutex);
5757         if (!error && mddev->kobj.sd) {
5758                 kobject_uevent(&mddev->kobj, KOBJ_ADD);
5759                 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
5760                 mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level");
5761         }
5762         mddev_put(mddev);
5763         return error;
5764 }
5765
5766 static struct kobject *md_probe(dev_t dev, int *part, void *data)
5767 {
5768         if (create_on_open)
5769                 md_alloc(dev, NULL);
5770         return NULL;
5771 }
5772
5773 static int add_named_array(const char *val, const struct kernel_param *kp)
5774 {
5775         /*
5776          * val must be "md_*" or "mdNNN".
5777          * For "md_*" we allocate an array with a large free minor number, and
5778          * set the name to val.  val must not already be an active name.
5779          * For "mdNNN" we allocate an array with the minor number NNN
5780          * which must not already be in use.
5781          */
5782         int len = strlen(val);
5783         char buf[DISK_NAME_LEN];
5784         unsigned long devnum;
5785
5786         while (len && val[len-1] == '\n')
5787                 len--;
5788         if (len >= DISK_NAME_LEN)
5789                 return -E2BIG;
5790         strlcpy(buf, val, len+1);
5791         if (strncmp(buf, "md_", 3) == 0)
5792                 return md_alloc(0, buf);
5793         if (strncmp(buf, "md", 2) == 0 &&
5794             isdigit(buf[2]) &&
5795             kstrtoul(buf+2, 10, &devnum) == 0 &&
5796             devnum <= MINORMASK)
5797                 return md_alloc(MKDEV(MD_MAJOR, devnum), NULL);
5798
5799         return -EINVAL;
5800 }
5801
5802 static void md_safemode_timeout(struct timer_list *t)
5803 {
5804         struct mddev *mddev = from_timer(mddev, t, safemode_timer);
5805
5806         mddev->safemode = 1;
5807         if (mddev->external)
5808                 sysfs_notify_dirent_safe(mddev->sysfs_state);
5809
5810         md_wakeup_thread(mddev->thread);
5811 }
5812
5813 static int start_dirty_degraded;
5814
5815 int md_run(struct mddev *mddev)
5816 {
5817         int err;
5818         struct md_rdev *rdev;
5819         struct md_personality *pers;
5820
5821         if (list_empty(&mddev->disks))
5822                 /* cannot run an array with no devices.. */
5823                 return -EINVAL;
5824
5825         if (mddev->pers)
5826                 return -EBUSY;
5827         /* Cannot run until previous stop completes properly */
5828         if (mddev->sysfs_active)
5829                 return -EBUSY;
5830
5831         /*
5832          * Analyze all RAID superblock(s)
5833          */
5834         if (!mddev->raid_disks) {
5835                 if (!mddev->persistent)
5836                         return -EINVAL;
5837                 err = analyze_sbs(mddev);
5838                 if (err)
5839                         return -EINVAL;
5840         }
5841
5842         if (mddev->level != LEVEL_NONE)
5843                 request_module("md-level-%d", mddev->level);
5844         else if (mddev->clevel[0])
5845                 request_module("md-%s", mddev->clevel);
5846
5847         /*
5848          * Drop all container device buffers, from now on
5849          * the only valid external interface is through the md
5850          * device.
5851          */
5852         mddev->has_superblocks = false;
5853         rdev_for_each(rdev, mddev) {
5854                 if (test_bit(Faulty, &rdev->flags))
5855                         continue;
5856                 sync_blockdev(rdev->bdev);
5857                 invalidate_bdev(rdev->bdev);
5858                 if (mddev->ro != 1 &&
5859                     (bdev_read_only(rdev->bdev) ||
5860                      bdev_read_only(rdev->meta_bdev))) {
5861                         mddev->ro = 1;
5862                         if (mddev->gendisk)
5863                                 set_disk_ro(mddev->gendisk, 1);
5864                 }
5865
5866                 if (rdev->sb_page)
5867                         mddev->has_superblocks = true;
5868
5869                 /* perform some consistency tests on the device.
5870                  * We don't want the data to overlap the metadata,
5871                  * Internal Bitmap issues have been handled elsewhere.
5872                  */
5873                 if (rdev->meta_bdev) {
5874                         /* Nothing to check */;
5875                 } else if (rdev->data_offset < rdev->sb_start) {
5876                         if (mddev->dev_sectors &&
5877                             rdev->data_offset + mddev->dev_sectors
5878                             > rdev->sb_start) {
5879                                 pr_warn("md: %s: data overlaps metadata\n",
5880                                         mdname(mddev));
5881                                 return -EINVAL;
5882                         }
5883                 } else {
5884                         if (rdev->sb_start + rdev->sb_size/512
5885                             > rdev->data_offset) {
5886                                 pr_warn("md: %s: metadata overlaps data\n",
5887                                         mdname(mddev));
5888                                 return -EINVAL;
5889                         }
5890                 }
5891                 sysfs_notify_dirent_safe(rdev->sysfs_state);
5892         }
5893
5894         if (!bioset_initialized(&mddev->bio_set)) {
5895                 err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
5896                 if (err)
5897                         return err;
5898         }
5899         if (!bioset_initialized(&mddev->sync_set)) {
5900                 err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
5901                 if (err)
5902                         return err;
5903         }
5904
5905         spin_lock(&pers_lock);
5906         pers = find_pers(mddev->level, mddev->clevel);
5907         if (!pers || !try_module_get(pers->owner)) {
5908                 spin_unlock(&pers_lock);
5909                 if (mddev->level != LEVEL_NONE)
5910                         pr_warn("md: personality for level %d is not loaded!\n",
5911                                 mddev->level);
5912                 else
5913                         pr_warn("md: personality for level %s is not loaded!\n",
5914                                 mddev->clevel);
5915                 err = -EINVAL;
5916                 goto abort;
5917         }
5918         spin_unlock(&pers_lock);
5919         if (mddev->level != pers->level) {
5920                 mddev->level = pers->level;
5921                 mddev->new_level = pers->level;
5922         }
5923         strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
5924
5925         if (mddev->reshape_position != MaxSector &&
5926             pers->start_reshape == NULL) {
5927                 /* This personality cannot handle reshaping... */
5928                 module_put(pers->owner);
5929                 err = -EINVAL;
5930                 goto abort;
5931         }
5932
5933         if (pers->sync_request) {
5934                 /* Warn if this is a potentially silly
5935                  * configuration.
5936                  */
5937                 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
5938                 struct md_rdev *rdev2;
5939                 int warned = 0;
5940
5941                 rdev_for_each(rdev, mddev)
5942                         rdev_for_each(rdev2, mddev) {
5943                                 if (rdev < rdev2 &&
5944                                     rdev->bdev->bd_disk ==
5945                                     rdev2->bdev->bd_disk) {
5946                                         pr_warn("%s: WARNING: %s appears to be on the same physical disk as %s.\n",
5947                                                 mdname(mddev),
5948                                                 bdevname(rdev->bdev,b),
5949                                                 bdevname(rdev2->bdev,b2));
5950                                         warned = 1;
5951                                 }
5952                         }
5953
5954                 if (warned)
5955                         pr_warn("True protection against single-disk failure might be compromised.\n");
5956         }
5957
5958         mddev->recovery = 0;
5959         /* may be over-ridden by personality */
5960         mddev->resync_max_sectors = mddev->dev_sectors;
5961
5962         mddev->ok_start_degraded = start_dirty_degraded;
5963
5964         if (start_readonly && mddev->ro == 0)
5965                 mddev->ro = 2; /* read-only, but switch on first write */
5966
5967         err = pers->run(mddev);
5968         if (err)
5969                 pr_warn("md: pers->run() failed ...\n");
5970         else if (pers->size(mddev, 0, 0) < mddev->array_sectors) {
5971                 WARN_ONCE(!mddev->external_size,
5972                           "%s: default size too small, but 'external_size' not in effect?\n",
5973                           __func__);
5974                 pr_warn("md: invalid array_size %llu > default size %llu\n",
5975                         (unsigned long long)mddev->array_sectors / 2,
5976                         (unsigned long long)pers->size(mddev, 0, 0) / 2);
5977                 err = -EINVAL;
5978         }
5979         if (err == 0 && pers->sync_request &&
5980             (mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
5981                 struct bitmap *bitmap;
5982
5983                 bitmap = md_bitmap_create(mddev, -1);
5984                 if (IS_ERR(bitmap)) {
5985                         err = PTR_ERR(bitmap);
5986                         pr_warn("%s: failed to create bitmap (%d)\n",
5987                                 mdname(mddev), err);
5988                 } else
5989                         mddev->bitmap = bitmap;
5990
5991         }
5992         if (err)
5993                 goto bitmap_abort;
5994
5995         if (mddev->bitmap_info.max_write_behind > 0) {
5996                 bool create_pool = false;
5997
5998                 rdev_for_each(rdev, mddev) {
5999                         if (test_bit(WriteMostly, &rdev->flags) &&
6000                             rdev_init_serial(rdev))
6001                                 create_pool = true;
6002                 }
6003                 if (create_pool && mddev->serial_info_pool == NULL) {
6004                         mddev->serial_info_pool =
6005                                 mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
6006                                                     sizeof(struct serial_info));
6007                         if (!mddev->serial_info_pool) {
6008                                 err = -ENOMEM;
6009                                 goto bitmap_abort;
6010                         }
6011                 }
6012         }
6013
6014         if (mddev->queue) {
6015                 bool nonrot = true;
6016
6017                 rdev_for_each(rdev, mddev) {
6018                         if (rdev->raid_disk >= 0 &&
6019                             !blk_queue_nonrot(bdev_get_queue(rdev->bdev))) {
6020                                 nonrot = false;
6021                                 break;
6022                         }
6023                 }
6024                 if (mddev->degraded)
6025                         nonrot = false;
6026                 if (nonrot)
6027                         blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue);
6028                 else
6029                         blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue);
6030         }
6031         if (pers->sync_request) {
6032                 if (mddev->kobj.sd &&
6033                     sysfs_create_group(&mddev->kobj, &md_redundancy_group))
6034                         pr_warn("md: cannot register extra attributes for %s\n",
6035                                 mdname(mddev));
6036                 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
6037                 mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
6038                 mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
6039         } else if (mddev->ro == 2) /* auto-readonly not meaningful */
6040                 mddev->ro = 0;
6041
6042         atomic_set(&mddev->max_corr_read_errors,
6043                    MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
6044         mddev->safemode = 0;
6045         if (mddev_is_clustered(mddev))
6046                 mddev->safemode_delay = 0;
6047         else
6048                 mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY;
6049         mddev->in_sync = 1;
6050         smp_wmb();
6051         spin_lock(&mddev->lock);
6052         mddev->pers = pers;
6053         spin_unlock(&mddev->lock);
6054         rdev_for_each(rdev, mddev)
6055                 if (rdev->raid_disk >= 0)
6056                         sysfs_link_rdev(mddev, rdev); /* failure here is OK */
6057
6058         if (mddev->degraded && !mddev->ro)
6059                 /* This ensures that recovering status is reported immediately
6060                  * via sysfs - until a lack of spares is confirmed.
6061                  */
6062                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
6063         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6064
6065         if (mddev->sb_flags)
6066                 md_update_sb(mddev, 0);
6067
6068         md_new_event(mddev);
6069         return 0;
6070
6071 bitmap_abort:
6072         mddev_detach(mddev);
6073         if (mddev->private)
6074                 pers->free(mddev, mddev->private);
6075         mddev->private = NULL;
6076         module_put(pers->owner);
6077         md_bitmap_destroy(mddev);
6078 abort:
6079         bioset_exit(&mddev->bio_set);
6080         bioset_exit(&mddev->sync_set);
6081         return err;
6082 }
6083 EXPORT_SYMBOL_GPL(md_run);
6084
6085 int do_md_run(struct mddev *mddev)
6086 {
6087         int err;
6088
6089         set_bit(MD_NOT_READY, &mddev->flags);
6090         err = md_run(mddev);
6091         if (err)
6092                 goto out;
6093         err = md_bitmap_load(mddev);
6094         if (err) {
6095                 md_bitmap_destroy(mddev);
6096                 goto out;
6097         }
6098
6099         if (mddev_is_clustered(mddev))
6100                 md_allow_write(mddev);
6101
6102         /* run start up tasks that require md_thread */
6103         md_start(mddev);
6104
6105         md_wakeup_thread(mddev->thread);
6106         md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
6107
6108         set_capacity(mddev->gendisk, mddev->array_sectors);
6109         revalidate_disk_size(mddev->gendisk, true);
6110         clear_bit(MD_NOT_READY, &mddev->flags);
6111         mddev->changed = 1;
6112         kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
6113         sysfs_notify_dirent_safe(mddev->sysfs_state);
6114         sysfs_notify_dirent_safe(mddev->sysfs_action);
6115         sysfs_notify_dirent_safe(mddev->sysfs_degraded);
6116 out:
6117         clear_bit(MD_NOT_READY, &mddev->flags);
6118         return err;
6119 }
6120
6121 int md_start(struct mddev *mddev)
6122 {
6123         int ret = 0;
6124
6125         if (mddev->pers->start) {
6126                 set_bit(MD_RECOVERY_WAIT, &mddev->recovery);
6127                 md_wakeup_thread(mddev->thread);
6128                 ret = mddev->pers->start(mddev);
6129                 clear_bit(MD_RECOVERY_WAIT, &mddev->recovery);
6130                 md_wakeup_thread(mddev->sync_thread);
6131         }
6132         return ret;
6133 }
6134 EXPORT_SYMBOL_GPL(md_start);
6135
6136 static int restart_array(struct mddev *mddev)
6137 {
6138         struct gendisk *disk = mddev->gendisk;
6139         struct md_rdev *rdev;
6140         bool has_journal = false;
6141         bool has_readonly = false;
6142
6143         /* Complain if it has no devices */
6144         if (list_empty(&mddev->disks))
6145                 return -ENXIO;
6146         if (!mddev->pers)
6147                 return -EINVAL;
6148         if (!mddev->ro)
6149                 return -EBUSY;
6150
6151         rcu_read_lock();
6152         rdev_for_each_rcu(rdev, mddev) {
6153                 if (test_bit(Journal, &rdev->flags) &&
6154                     !test_bit(Faulty, &rdev->flags))
6155                         has_journal = true;
6156                 if (bdev_read_only(rdev->bdev))
6157                         has_readonly = true;
6158         }
6159         rcu_read_unlock();
6160         if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !has_journal)
6161                 /* Don't restart rw with journal missing/faulty */
6162                         return -EINVAL;
6163         if (has_readonly)
6164                 return -EROFS;
6165
6166         mddev->safemode = 0;
6167         mddev->ro = 0;
6168         set_disk_ro(disk, 0);
6169         pr_debug("md: %s switched to read-write mode.\n", mdname(mddev));
6170         /* Kick recovery or resync if necessary */
6171         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6172         md_wakeup_thread(mddev->thread);
6173         md_wakeup_thread(mddev->sync_thread);
6174         sysfs_notify_dirent_safe(mddev->sysfs_state);
6175         return 0;
6176 }
6177
6178 static void md_clean(struct mddev *mddev)
6179 {
6180         mddev->array_sectors = 0;
6181         mddev->external_size = 0;
6182         mddev->dev_sectors = 0;
6183         mddev->raid_disks = 0;
6184         mddev->recovery_cp = 0;
6185         mddev->resync_min = 0;
6186         mddev->resync_max = MaxSector;
6187         mddev->reshape_position = MaxSector;
6188         mddev->external = 0;
6189         mddev->persistent = 0;
6190         mddev->level = LEVEL_NONE;
6191         mddev->clevel[0] = 0;
6192         mddev->flags = 0;
6193         mddev->sb_flags = 0;
6194         mddev->ro = 0;
6195         mddev->metadata_type[0] = 0;
6196         mddev->chunk_sectors = 0;
6197         mddev->ctime = mddev->utime = 0;
6198         mddev->layout = 0;
6199         mddev->max_disks = 0;
6200         mddev->events = 0;
6201         mddev->can_decrease_events = 0;
6202         mddev->delta_disks = 0;
6203         mddev->reshape_backwards = 0;
6204         mddev->new_level = LEVEL_NONE;
6205         mddev->new_layout = 0;
6206         mddev->new_chunk_sectors = 0;
6207         mddev->curr_resync = 0;
6208         atomic64_set(&mddev->resync_mismatches, 0);
6209         mddev->suspend_lo = mddev->suspend_hi = 0;
6210         mddev->sync_speed_min = mddev->sync_speed_max = 0;
6211         mddev->recovery = 0;
6212         mddev->in_sync = 0;
6213         mddev->changed = 0;
6214         mddev->degraded = 0;
6215         mddev->safemode = 0;
6216         mddev->private = NULL;
6217         mddev->cluster_info = NULL;
6218         mddev->bitmap_info.offset = 0;
6219         mddev->bitmap_info.default_offset = 0;
6220         mddev->bitmap_info.default_space = 0;
6221         mddev->bitmap_info.chunksize = 0;
6222         mddev->bitmap_info.daemon_sleep = 0;
6223         mddev->bitmap_info.max_write_behind = 0;
6224         mddev->bitmap_info.nodes = 0;
6225 }
6226
6227 static void __md_stop_writes(struct mddev *mddev)
6228 {
6229         set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6230         if (work_pending(&mddev->del_work))
6231                 flush_workqueue(md_misc_wq);
6232         if (mddev->sync_thread) {
6233                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6234                 md_reap_sync_thread(mddev);
6235         }
6236
6237         del_timer_sync(&mddev->safemode_timer);
6238
6239         if (mddev->pers && mddev->pers->quiesce) {
6240                 mddev->pers->quiesce(mddev, 1);
6241                 mddev->pers->quiesce(mddev, 0);
6242         }
6243         md_bitmap_flush(mddev);
6244
6245         if (mddev->ro == 0 &&
6246             ((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
6247              mddev->sb_flags)) {
6248                 /* mark array as shutdown cleanly */
6249                 if (!mddev_is_clustered(mddev))
6250                         mddev->in_sync = 1;
6251                 md_update_sb(mddev, 1);
6252         }
6253         /* disable policy to guarantee rdevs free resources for serialization */
6254         mddev->serialize_policy = 0;
6255         mddev_destroy_serial_pool(mddev, NULL, true);
6256 }
6257
6258 void md_stop_writes(struct mddev *mddev)
6259 {
6260         mddev_lock_nointr(mddev);
6261         __md_stop_writes(mddev);
6262         mddev_unlock(mddev);
6263 }
6264 EXPORT_SYMBOL_GPL(md_stop_writes);
6265
6266 static void mddev_detach(struct mddev *mddev)
6267 {
6268         md_bitmap_wait_behind_writes(mddev);
6269         if (mddev->pers && mddev->pers->quiesce && !mddev->suspended) {
6270                 mddev->pers->quiesce(mddev, 1);
6271                 mddev->pers->quiesce(mddev, 0);
6272         }
6273         md_unregister_thread(&mddev->thread);
6274         if (mddev->queue)
6275                 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
6276 }
6277
6278 static void __md_stop(struct mddev *mddev)
6279 {
6280         struct md_personality *pers = mddev->pers;
6281         md_bitmap_destroy(mddev);
6282         mddev_detach(mddev);
6283         /* Ensure ->event_work is done */
6284         if (mddev->event_work.func)
6285                 flush_workqueue(md_misc_wq);
6286         spin_lock(&mddev->lock);
6287         mddev->pers = NULL;
6288         spin_unlock(&mddev->lock);
6289         pers->free(mddev, mddev->private);
6290         mddev->private = NULL;
6291         if (pers->sync_request && mddev->to_remove == NULL)
6292                 mddev->to_remove = &md_redundancy_group;
6293         module_put(pers->owner);
6294         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6295 }
6296
6297 void md_stop(struct mddev *mddev)
6298 {
6299         /* stop the array and free an attached data structures.
6300          * This is called from dm-raid
6301          */
6302         __md_stop_writes(mddev);
6303         __md_stop(mddev);
6304         bioset_exit(&mddev->bio_set);
6305         bioset_exit(&mddev->sync_set);
6306 }
6307
6308 EXPORT_SYMBOL_GPL(md_stop);
6309
6310 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
6311 {
6312         int err = 0;
6313         int did_freeze = 0;
6314
6315         if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
6316                 did_freeze = 1;
6317                 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6318                 md_wakeup_thread(mddev->thread);
6319         }
6320         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
6321                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6322         if (mddev->sync_thread)
6323                 /* Thread might be blocked waiting for metadata update
6324                  * which will now never happen */
6325                 wake_up_process(mddev->sync_thread->tsk);
6326
6327         if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
6328                 return -EBUSY;
6329         mddev_unlock(mddev);
6330         wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
6331                                           &mddev->recovery));
6332         wait_event(mddev->sb_wait,
6333                    !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
6334         mddev_lock_nointr(mddev);
6335
6336         mutex_lock(&mddev->open_mutex);
6337         if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
6338             mddev->sync_thread ||
6339             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
6340                 pr_warn("md: %s still in use.\n",mdname(mddev));
6341                 if (did_freeze) {
6342                         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6343                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6344                         md_wakeup_thread(mddev->thread);
6345                 }
6346                 err = -EBUSY;
6347                 goto out;
6348         }
6349         if (mddev->pers) {
6350                 __md_stop_writes(mddev);
6351
6352                 err  = -ENXIO;
6353                 if (mddev->ro==1)
6354                         goto out;
6355                 mddev->ro = 1;
6356                 set_disk_ro(mddev->gendisk, 1);
6357                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6358                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6359                 md_wakeup_thread(mddev->thread);
6360                 sysfs_notify_dirent_safe(mddev->sysfs_state);
6361                 err = 0;
6362         }
6363 out:
6364         mutex_unlock(&mddev->open_mutex);
6365         return err;
6366 }
6367
6368 /* mode:
6369  *   0 - completely stop and dis-assemble array
6370  *   2 - stop but do not disassemble array
6371  */
6372 static int do_md_stop(struct mddev *mddev, int mode,
6373                       struct block_device *bdev)
6374 {
6375         struct gendisk *disk = mddev->gendisk;
6376         struct md_rdev *rdev;
6377         int did_freeze = 0;
6378
6379         if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
6380                 did_freeze = 1;
6381                 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6382                 md_wakeup_thread(mddev->thread);
6383         }
6384         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
6385                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6386         if (mddev->sync_thread)
6387                 /* Thread might be blocked waiting for metadata update
6388                  * which will now never happen */
6389                 wake_up_process(mddev->sync_thread->tsk);
6390
6391         mddev_unlock(mddev);
6392         wait_event(resync_wait, (mddev->sync_thread == NULL &&
6393                                  !test_bit(MD_RECOVERY_RUNNING,
6394                                            &mddev->recovery)));
6395         mddev_lock_nointr(mddev);
6396
6397         mutex_lock(&mddev->open_mutex);
6398         if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
6399             mddev->sysfs_active ||
6400             mddev->sync_thread ||
6401             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
6402                 pr_warn("md: %s still in use.\n",mdname(mddev));
6403                 mutex_unlock(&mddev->open_mutex);
6404                 if (did_freeze) {
6405                         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6406                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6407                         md_wakeup_thread(mddev->thread);
6408                 }
6409                 return -EBUSY;
6410         }
6411         if (mddev->pers) {
6412                 if (mddev->ro)
6413                         set_disk_ro(disk, 0);
6414
6415                 __md_stop_writes(mddev);
6416                 __md_stop(mddev);
6417
6418                 /* tell userspace to handle 'inactive' */
6419                 sysfs_notify_dirent_safe(mddev->sysfs_state);
6420
6421                 rdev_for_each(rdev, mddev)
6422                         if (rdev->raid_disk >= 0)
6423                                 sysfs_unlink_rdev(mddev, rdev);
6424
6425                 set_capacity(disk, 0);
6426                 mutex_unlock(&mddev->open_mutex);
6427                 mddev->changed = 1;
6428                 revalidate_disk_size(disk, true);
6429
6430                 if (mddev->ro)
6431                         mddev->ro = 0;
6432         } else
6433                 mutex_unlock(&mddev->open_mutex);
6434         /*
6435          * Free resources if final stop
6436          */
6437         if (mode == 0) {
6438                 pr_info("md: %s stopped.\n", mdname(mddev));
6439
6440                 if (mddev->bitmap_info.file) {
6441                         struct file *f = mddev->bitmap_info.file;
6442                         spin_lock(&mddev->lock);
6443                         mddev->bitmap_info.file = NULL;
6444                         spin_unlock(&mddev->lock);
6445                         fput(f);
6446                 }
6447                 mddev->bitmap_info.offset = 0;
6448
6449                 export_array(mddev);
6450
6451                 md_clean(mddev);
6452                 if (mddev->hold_active == UNTIL_STOP)
6453                         mddev->hold_active = 0;
6454         }
6455         md_new_event(mddev);
6456         sysfs_notify_dirent_safe(mddev->sysfs_state);
6457         return 0;
6458 }
6459
6460 #ifndef MODULE
6461 static void autorun_array(struct mddev *mddev)
6462 {
6463         struct md_rdev *rdev;
6464         int err;
6465
6466         if (list_empty(&mddev->disks))
6467                 return;
6468
6469         pr_info("md: running: ");
6470
6471         rdev_for_each(rdev, mddev) {
6472                 char b[BDEVNAME_SIZE];
6473                 pr_cont("<%s>", bdevname(rdev->bdev,b));
6474         }
6475         pr_cont("\n");
6476
6477         err = do_md_run(mddev);
6478         if (err) {
6479                 pr_warn("md: do_md_run() returned %d\n", err);
6480                 do_md_stop(mddev, 0, NULL);
6481         }
6482 }
6483
6484 /*
6485  * lets try to run arrays based on all disks that have arrived
6486  * until now. (those are in pending_raid_disks)
6487  *
6488  * the method: pick the first pending disk, collect all disks with
6489  * the same UUID, remove all from the pending list and put them into
6490  * the 'same_array' list. Then order this list based on superblock
6491  * update time (freshest comes first), kick out 'old' disks and
6492  * compare superblocks. If everything's fine then run it.
6493  *
6494  * If "unit" is allocated, then bump its reference count
6495  */
6496 static void autorun_devices(int part)
6497 {
6498         struct md_rdev *rdev0, *rdev, *tmp;
6499         struct mddev *mddev;
6500         char b[BDEVNAME_SIZE];
6501
6502         pr_info("md: autorun ...\n");
6503         while (!list_empty(&pending_raid_disks)) {
6504                 int unit;
6505                 dev_t dev;
6506                 LIST_HEAD(candidates);
6507                 rdev0 = list_entry(pending_raid_disks.next,
6508                                          struct md_rdev, same_set);
6509
6510                 pr_debug("md: considering %s ...\n", bdevname(rdev0->bdev,b));
6511                 INIT_LIST_HEAD(&candidates);
6512                 rdev_for_each_list(rdev, tmp, &pending_raid_disks)
6513                         if (super_90_load(rdev, rdev0, 0) >= 0) {
6514                                 pr_debug("md:  adding %s ...\n",
6515                                          bdevname(rdev->bdev,b));
6516                                 list_move(&rdev->same_set, &candidates);
6517                         }
6518                 /*
6519                  * now we have a set of devices, with all of them having
6520                  * mostly sane superblocks. It's time to allocate the
6521                  * mddev.
6522                  */
6523                 if (part) {
6524                         dev = MKDEV(mdp_major,
6525                                     rdev0->preferred_minor << MdpMinorShift);
6526                         unit = MINOR(dev) >> MdpMinorShift;
6527                 } else {
6528                         dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
6529                         unit = MINOR(dev);
6530                 }
6531                 if (rdev0->preferred_minor != unit) {
6532                         pr_warn("md: unit number in %s is bad: %d\n",
6533                                 bdevname(rdev0->bdev, b), rdev0->preferred_minor);
6534                         break;
6535                 }
6536
6537                 md_probe(dev, NULL, NULL);
6538                 mddev = mddev_find(dev);
6539                 if (!mddev)
6540                         break;
6541
6542                 if (mddev_lock(mddev))
6543                         pr_warn("md: %s locked, cannot run\n", mdname(mddev));
6544                 else if (mddev->raid_disks || mddev->major_version
6545                          || !list_empty(&mddev->disks)) {
6546                         pr_warn("md: %s already running, cannot run %s\n",
6547                                 mdname(mddev), bdevname(rdev0->bdev,b));
6548                         mddev_unlock(mddev);
6549                 } else {
6550                         pr_debug("md: created %s\n", mdname(mddev));
6551                         mddev->persistent = 1;
6552                         rdev_for_each_list(rdev, tmp, &candidates) {
6553                                 list_del_init(&rdev->same_set);
6554                                 if (bind_rdev_to_array(rdev, mddev))
6555                                         export_rdev(rdev);
6556                         }
6557                         autorun_array(mddev);
6558                         mddev_unlock(mddev);
6559                 }
6560                 /* on success, candidates will be empty, on error
6561                  * it won't...
6562                  */
6563                 rdev_for_each_list(rdev, tmp, &candidates) {
6564                         list_del_init(&rdev->same_set);
6565                         export_rdev(rdev);
6566                 }
6567                 mddev_put(mddev);
6568         }
6569         pr_info("md: ... autorun DONE.\n");
6570 }
6571 #endif /* !MODULE */
6572
6573 static int get_version(void __user *arg)
6574 {
6575         mdu_version_t ver;
6576
6577         ver.major = MD_MAJOR_VERSION;
6578         ver.minor = MD_MINOR_VERSION;
6579         ver.patchlevel = MD_PATCHLEVEL_VERSION;
6580
6581         if (copy_to_user(arg, &ver, sizeof(ver)))
6582                 return -EFAULT;
6583
6584         return 0;
6585 }
6586
6587 static int get_array_info(struct mddev *mddev, void __user *arg)
6588 {
6589         mdu_array_info_t info;
6590         int nr,working,insync,failed,spare;
6591         struct md_rdev *rdev;
6592
6593         nr = working = insync = failed = spare = 0;
6594         rcu_read_lock();
6595         rdev_for_each_rcu(rdev, mddev) {
6596                 nr++;
6597                 if (test_bit(Faulty, &rdev->flags))
6598                         failed++;
6599                 else {
6600                         working++;
6601                         if (test_bit(In_sync, &rdev->flags))
6602                                 insync++;
6603                         else if (test_bit(Journal, &rdev->flags))
6604                                 /* TODO: add journal count to md_u.h */
6605                                 ;
6606                         else
6607                                 spare++;
6608                 }
6609         }
6610         rcu_read_unlock();
6611
6612         info.major_version = mddev->major_version;
6613         info.minor_version = mddev->minor_version;
6614         info.patch_version = MD_PATCHLEVEL_VERSION;
6615         info.ctime         = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
6616         info.level         = mddev->level;
6617         info.size          = mddev->dev_sectors / 2;
6618         if (info.size != mddev->dev_sectors / 2) /* overflow */
6619                 info.size = -1;
6620         info.nr_disks      = nr;
6621         info.raid_disks    = mddev->raid_disks;
6622         info.md_minor      = mddev->md_minor;
6623         info.not_persistent= !mddev->persistent;
6624
6625         info.utime         = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
6626         info.state         = 0;
6627         if (mddev->in_sync)
6628                 info.state = (1<<MD_SB_CLEAN);
6629         if (mddev->bitmap && mddev->bitmap_info.offset)
6630                 info.state |= (1<<MD_SB_BITMAP_PRESENT);
6631         if (mddev_is_clustered(mddev))
6632                 info.state |= (1<<MD_SB_CLUSTERED);
6633         info.active_disks  = insync;
6634         info.working_disks = working;
6635         info.failed_disks  = failed;
6636         info.spare_disks   = spare;
6637
6638         info.layout        = mddev->layout;
6639         info.chunk_size    = mddev->chunk_sectors << 9;
6640
6641         if (copy_to_user(arg, &info, sizeof(info)))
6642                 return -EFAULT;
6643
6644         return 0;
6645 }
6646
6647 static int get_bitmap_file(struct mddev *mddev, void __user * arg)
6648 {
6649         mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
6650         char *ptr;
6651         int err;
6652
6653         file = kzalloc(sizeof(*file), GFP_NOIO);
6654         if (!file)
6655                 return -ENOMEM;
6656
6657         err = 0;
6658         spin_lock(&mddev->lock);
6659         /* bitmap enabled */
6660         if (mddev->bitmap_info.file) {
6661                 ptr = file_path(mddev->bitmap_info.file, file->pathname,
6662                                 sizeof(file->pathname));
6663                 if (IS_ERR(ptr))
6664                         err = PTR_ERR(ptr);
6665                 else
6666                         memmove(file->pathname, ptr,
6667                                 sizeof(file->pathname)-(ptr-file->pathname));
6668         }
6669         spin_unlock(&mddev->lock);
6670
6671         if (err == 0 &&
6672             copy_to_user(arg, file, sizeof(*file)))
6673                 err = -EFAULT;
6674
6675         kfree(file);
6676         return err;
6677 }
6678
6679 static int get_disk_info(struct mddev *mddev, void __user * arg)
6680 {
6681         mdu_disk_info_t info;
6682         struct md_rdev *rdev;
6683
6684         if (copy_from_user(&info, arg, sizeof(info)))
6685                 return -EFAULT;
6686
6687         rcu_read_lock();
6688         rdev = md_find_rdev_nr_rcu(mddev, info.number);
6689         if (rdev) {
6690                 info.major = MAJOR(rdev->bdev->bd_dev);
6691                 info.minor = MINOR(rdev->bdev->bd_dev);
6692                 info.raid_disk = rdev->raid_disk;
6693                 info.state = 0;
6694                 if (test_bit(Faulty, &rdev->flags))
6695                         info.state |= (1<<MD_DISK_FAULTY);
6696                 else if (test_bit(In_sync, &rdev->flags)) {
6697                         info.state |= (1<<MD_DISK_ACTIVE);
6698                         info.state |= (1<<MD_DISK_SYNC);
6699                 }
6700                 if (test_bit(Journal, &rdev->flags))
6701                         info.state |= (1<<MD_DISK_JOURNAL);
6702                 if (test_bit(WriteMostly, &rdev->flags))
6703                         info.state |= (1<<MD_DISK_WRITEMOSTLY);
6704                 if (test_bit(FailFast, &rdev->flags))
6705                         info.state |= (1<<MD_DISK_FAILFAST);
6706         } else {
6707                 info.major = info.minor = 0;
6708                 info.raid_disk = -1;
6709                 info.state = (1<<MD_DISK_REMOVED);
6710         }
6711         rcu_read_unlock();
6712
6713         if (copy_to_user(arg, &info, sizeof(info)))
6714                 return -EFAULT;
6715
6716         return 0;
6717 }
6718
6719 int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
6720 {
6721         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
6722         struct md_rdev *rdev;
6723         dev_t dev = MKDEV(info->major,info->minor);
6724
6725         if (mddev_is_clustered(mddev) &&
6726                 !(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) {
6727                 pr_warn("%s: Cannot add to clustered mddev.\n",
6728                         mdname(mddev));
6729                 return -EINVAL;
6730         }
6731
6732         if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
6733                 return -EOVERFLOW;
6734
6735         if (!mddev->raid_disks) {
6736                 int err;
6737                 /* expecting a device which has a superblock */
6738                 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
6739                 if (IS_ERR(rdev)) {
6740                         pr_warn("md: md_import_device returned %ld\n",
6741                                 PTR_ERR(rdev));
6742                         return PTR_ERR(rdev);
6743                 }
6744                 if (!list_empty(&mddev->disks)) {
6745                         struct md_rdev *rdev0
6746                                 = list_entry(mddev->disks.next,
6747                                              struct md_rdev, same_set);
6748                         err = super_types[mddev->major_version]
6749                                 .load_super(rdev, rdev0, mddev->minor_version);
6750                         if (err < 0) {
6751                                 pr_warn("md: %s has different UUID to %s\n",
6752                                         bdevname(rdev->bdev,b),
6753                                         bdevname(rdev0->bdev,b2));
6754                                 export_rdev(rdev);
6755                                 return -EINVAL;
6756                         }
6757                 }
6758                 err = bind_rdev_to_array(rdev, mddev);
6759                 if (err)
6760                         export_rdev(rdev);
6761                 return err;
6762         }
6763
6764         /*
6765          * md_add_new_disk can be used once the array is assembled
6766          * to add "hot spares".  They must already have a superblock
6767          * written
6768          */
6769         if (mddev->pers) {
6770                 int err;
6771                 if (!mddev->pers->hot_add_disk) {
6772                         pr_warn("%s: personality does not support diskops!\n",
6773                                 mdname(mddev));
6774                         return -EINVAL;
6775                 }
6776                 if (mddev->persistent)
6777                         rdev = md_import_device(dev, mddev->major_version,
6778                                                 mddev->minor_version);
6779                 else
6780                         rdev = md_import_device(dev, -1, -1);
6781                 if (IS_ERR(rdev)) {
6782                         pr_warn("md: md_import_device returned %ld\n",
6783                                 PTR_ERR(rdev));
6784                         return PTR_ERR(rdev);
6785                 }
6786                 /* set saved_raid_disk if appropriate */
6787                 if (!mddev->persistent) {
6788                         if (info->state & (1<<MD_DISK_SYNC)  &&
6789                             info->raid_disk < mddev->raid_disks) {
6790                                 rdev->raid_disk = info->raid_disk;
6791                                 set_bit(In_sync, &rdev->flags);
6792                                 clear_bit(Bitmap_sync, &rdev->flags);
6793                         } else
6794                                 rdev->raid_disk = -1;
6795                         rdev->saved_raid_disk = rdev->raid_disk;
6796                 } else
6797                         super_types[mddev->major_version].
6798                                 validate_super(mddev, rdev);
6799                 if ((info->state & (1<<MD_DISK_SYNC)) &&
6800                      rdev->raid_disk != info->raid_disk) {
6801                         /* This was a hot-add request, but events doesn't
6802                          * match, so reject it.
6803                          */
6804                         export_rdev(rdev);
6805                         return -EINVAL;
6806                 }
6807
6808                 clear_bit(In_sync, &rdev->flags); /* just to be sure */
6809                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6810                         set_bit(WriteMostly, &rdev->flags);
6811                 else
6812                         clear_bit(WriteMostly, &rdev->flags);
6813                 if (info->state & (1<<MD_DISK_FAILFAST))
6814                         set_bit(FailFast, &rdev->flags);
6815                 else
6816                         clear_bit(FailFast, &rdev->flags);
6817
6818                 if (info->state & (1<<MD_DISK_JOURNAL)) {
6819                         struct md_rdev *rdev2;
6820                         bool has_journal = false;
6821
6822                         /* make sure no existing journal disk */
6823                         rdev_for_each(rdev2, mddev) {
6824                                 if (test_bit(Journal, &rdev2->flags)) {
6825                                         has_journal = true;
6826                                         break;
6827                                 }
6828                         }
6829                         if (has_journal || mddev->bitmap) {
6830                                 export_rdev(rdev);
6831                                 return -EBUSY;
6832                         }
6833                         set_bit(Journal, &rdev->flags);
6834                 }
6835                 /*
6836                  * check whether the device shows up in other nodes
6837                  */
6838                 if (mddev_is_clustered(mddev)) {
6839                         if (info->state & (1 << MD_DISK_CANDIDATE))
6840                                 set_bit(Candidate, &rdev->flags);
6841                         else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) {
6842                                 /* --add initiated by this node */
6843                                 err = md_cluster_ops->add_new_disk(mddev, rdev);
6844                                 if (err) {
6845                                         export_rdev(rdev);
6846                                         return err;
6847                                 }
6848                         }
6849                 }
6850
6851                 rdev->raid_disk = -1;
6852                 err = bind_rdev_to_array(rdev, mddev);
6853
6854                 if (err)
6855                         export_rdev(rdev);
6856
6857                 if (mddev_is_clustered(mddev)) {
6858                         if (info->state & (1 << MD_DISK_CANDIDATE)) {
6859                                 if (!err) {
6860                                         err = md_cluster_ops->new_disk_ack(mddev,
6861                                                 err == 0);
6862                                         if (err)
6863                                                 md_kick_rdev_from_array(rdev);
6864                                 }
6865                         } else {
6866                                 if (err)
6867                                         md_cluster_ops->add_new_disk_cancel(mddev);
6868                                 else
6869                                         err = add_bound_rdev(rdev);
6870                         }
6871
6872                 } else if (!err)
6873                         err = add_bound_rdev(rdev);
6874
6875                 return err;
6876         }
6877
6878         /* otherwise, md_add_new_disk is only allowed
6879          * for major_version==0 superblocks
6880          */
6881         if (mddev->major_version != 0) {
6882                 pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev));
6883                 return -EINVAL;
6884         }
6885
6886         if (!(info->state & (1<<MD_DISK_FAULTY))) {
6887                 int err;
6888                 rdev = md_import_device(dev, -1, 0);
6889                 if (IS_ERR(rdev)) {
6890                         pr_warn("md: error, md_import_device() returned %ld\n",
6891                                 PTR_ERR(rdev));
6892                         return PTR_ERR(rdev);
6893                 }
6894                 rdev->desc_nr = info->number;
6895                 if (info->raid_disk < mddev->raid_disks)
6896                         rdev->raid_disk = info->raid_disk;
6897                 else
6898                         rdev->raid_disk = -1;
6899
6900                 if (rdev->raid_disk < mddev->raid_disks)
6901                         if (info->state & (1<<MD_DISK_SYNC))
6902                                 set_bit(In_sync, &rdev->flags);
6903
6904                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6905                         set_bit(WriteMostly, &rdev->flags);
6906                 if (info->state & (1<<MD_DISK_FAILFAST))
6907                         set_bit(FailFast, &rdev->flags);
6908
6909                 if (!mddev->persistent) {
6910                         pr_debug("md: nonpersistent superblock ...\n");
6911                         rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
6912                 } else
6913                         rdev->sb_start = calc_dev_sboffset(rdev);
6914                 rdev->sectors = rdev->sb_start;
6915
6916                 err = bind_rdev_to_array(rdev, mddev);
6917                 if (err) {
6918                         export_rdev(rdev);
6919                         return err;
6920                 }
6921         }
6922
6923         return 0;
6924 }
6925
6926 static int hot_remove_disk(struct mddev *mddev, dev_t dev)
6927 {
6928         char b[BDEVNAME_SIZE];
6929         struct md_rdev *rdev;
6930
6931         if (!mddev->pers)
6932                 return -ENODEV;
6933
6934         rdev = find_rdev(mddev, dev);
6935         if (!rdev)
6936                 return -ENXIO;
6937
6938         if (rdev->raid_disk < 0)
6939                 goto kick_rdev;
6940
6941         clear_bit(Blocked, &rdev->flags);
6942         remove_and_add_spares(mddev, rdev);
6943
6944         if (rdev->raid_disk >= 0)
6945                 goto busy;
6946
6947 kick_rdev:
6948         if (mddev_is_clustered(mddev)) {
6949                 if (md_cluster_ops->remove_disk(mddev, rdev))
6950                         goto busy;
6951         }
6952
6953         md_kick_rdev_from_array(rdev);
6954         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
6955         if (mddev->thread)
6956                 md_wakeup_thread(mddev->thread);
6957         else
6958                 md_update_sb(mddev, 1);
6959         md_new_event(mddev);
6960
6961         return 0;
6962 busy:
6963         pr_debug("md: cannot remove active disk %s from %s ...\n",
6964                  bdevname(rdev->bdev,b), mdname(mddev));
6965         return -EBUSY;
6966 }
6967
6968 static int hot_add_disk(struct mddev *mddev, dev_t dev)
6969 {
6970         char b[BDEVNAME_SIZE];
6971         int err;
6972         struct md_rdev *rdev;
6973
6974         if (!mddev->pers)
6975                 return -ENODEV;
6976
6977         if (mddev->major_version != 0) {
6978                 pr_warn("%s: HOT_ADD may only be used with version-0 superblocks.\n",
6979                         mdname(mddev));
6980                 return -EINVAL;
6981         }
6982         if (!mddev->pers->hot_add_disk) {
6983                 pr_warn("%s: personality does not support diskops!\n",
6984                         mdname(mddev));
6985                 return -EINVAL;
6986         }
6987
6988         rdev = md_import_device(dev, -1, 0);
6989         if (IS_ERR(rdev)) {
6990                 pr_warn("md: error, md_import_device() returned %ld\n",
6991                         PTR_ERR(rdev));
6992                 return -EINVAL;
6993         }
6994
6995         if (mddev->persistent)
6996                 rdev->sb_start = calc_dev_sboffset(rdev);
6997         else
6998                 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
6999
7000         rdev->sectors = rdev->sb_start;
7001
7002         if (test_bit(Faulty, &rdev->flags)) {
7003                 pr_warn("md: can not hot-add faulty %s disk to %s!\n",
7004                         bdevname(rdev->bdev,b), mdname(mddev));
7005                 err = -EINVAL;
7006                 goto abort_export;
7007         }
7008
7009         clear_bit(In_sync, &rdev->flags);
7010         rdev->desc_nr = -1;
7011         rdev->saved_raid_disk = -1;
7012         err = bind_rdev_to_array(rdev, mddev);
7013         if (err)
7014                 goto abort_export;
7015
7016         /*
7017          * The rest should better be atomic, we can have disk failures
7018          * noticed in interrupt contexts ...
7019          */
7020
7021         rdev->raid_disk = -1;
7022
7023         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
7024         if (!mddev->thread)
7025                 md_update_sb(mddev, 1);
7026         /*
7027          * Kick recovery, maybe this spare has to be added to the
7028          * array immediately.
7029          */
7030         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7031         md_wakeup_thread(mddev->thread);
7032         md_new_event(mddev);
7033         return 0;
7034
7035 abort_export:
7036         export_rdev(rdev);
7037         return err;
7038 }
7039
7040 static int set_bitmap_file(struct mddev *mddev, int fd)
7041 {
7042         int err = 0;
7043
7044         if (mddev->pers) {
7045                 if (!mddev->pers->quiesce || !mddev->thread)
7046                         return -EBUSY;
7047                 if (mddev->recovery || mddev->sync_thread)
7048                         return -EBUSY;
7049                 /* we should be able to change the bitmap.. */
7050         }
7051
7052         if (fd >= 0) {
7053                 struct inode *inode;
7054                 struct file *f;
7055
7056                 if (mddev->bitmap || mddev->bitmap_info.file)
7057                         return -EEXIST; /* cannot add when bitmap is present */
7058                 f = fget(fd);
7059
7060                 if (f == NULL) {
7061                         pr_warn("%s: error: failed to get bitmap file\n",
7062                                 mdname(mddev));
7063                         return -EBADF;
7064                 }
7065
7066                 inode = f->f_mapping->host;
7067                 if (!S_ISREG(inode->i_mode)) {
7068                         pr_warn("%s: error: bitmap file must be a regular file\n",
7069                                 mdname(mddev));
7070                         err = -EBADF;
7071                 } else if (!(f->f_mode & FMODE_WRITE)) {
7072                         pr_warn("%s: error: bitmap file must open for write\n",
7073                                 mdname(mddev));
7074                         err = -EBADF;
7075                 } else if (atomic_read(&inode->i_writecount) != 1) {
7076                         pr_warn("%s: error: bitmap file is already in use\n",
7077                                 mdname(mddev));
7078                         err = -EBUSY;
7079                 }
7080                 if (err) {
7081                         fput(f);
7082                         return err;
7083                 }
7084                 mddev->bitmap_info.file = f;
7085                 mddev->bitmap_info.offset = 0; /* file overrides offset */
7086         } else if (mddev->bitmap == NULL)
7087                 return -ENOENT; /* cannot remove what isn't there */
7088         err = 0;
7089         if (mddev->pers) {
7090                 if (fd >= 0) {
7091                         struct bitmap *bitmap;
7092
7093                         bitmap = md_bitmap_create(mddev, -1);
7094                         mddev_suspend(mddev);
7095                         if (!IS_ERR(bitmap)) {
7096                                 mddev->bitmap = bitmap;
7097                                 err = md_bitmap_load(mddev);
7098                         } else
7099                                 err = PTR_ERR(bitmap);
7100                         if (err) {
7101                                 md_bitmap_destroy(mddev);
7102                                 fd = -1;
7103                         }
7104                         mddev_resume(mddev);
7105                 } else if (fd < 0) {
7106                         mddev_suspend(mddev);
7107                         md_bitmap_destroy(mddev);
7108                         mddev_resume(mddev);
7109                 }
7110         }
7111         if (fd < 0) {
7112                 struct file *f = mddev->bitmap_info.file;
7113                 if (f) {
7114                         spin_lock(&mddev->lock);
7115                         mddev->bitmap_info.file = NULL;
7116                         spin_unlock(&mddev->lock);
7117                         fput(f);
7118                 }
7119         }
7120
7121         return err;
7122 }
7123
7124 /*
7125  * md_set_array_info is used two different ways
7126  * The original usage is when creating a new array.
7127  * In this usage, raid_disks is > 0 and it together with
7128  *  level, size, not_persistent,layout,chunksize determine the
7129  *  shape of the array.
7130  *  This will always create an array with a type-0.90.0 superblock.
7131  * The newer usage is when assembling an array.
7132  *  In this case raid_disks will be 0, and the major_version field is
7133  *  use to determine which style super-blocks are to be found on the devices.
7134  *  The minor and patch _version numbers are also kept incase the
7135  *  super_block handler wishes to interpret them.
7136  */
7137 int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info)
7138 {
7139         if (info->raid_disks == 0) {
7140                 /* just setting version number for superblock loading */
7141                 if (info->major_version < 0 ||
7142                     info->major_version >= ARRAY_SIZE(super_types) ||
7143                     super_types[info->major_version].name == NULL) {
7144                         /* maybe try to auto-load a module? */
7145                         pr_warn("md: superblock version %d not known\n",
7146                                 info->major_version);
7147                         return -EINVAL;
7148                 }
7149                 mddev->major_version = info->major_version;
7150                 mddev->minor_version = info->minor_version;
7151                 mddev->patch_version = info->patch_version;
7152                 mddev->persistent = !info->not_persistent;
7153                 /* ensure mddev_put doesn't delete this now that there
7154                  * is some minimal configuration.
7155                  */
7156                 mddev->ctime         = ktime_get_real_seconds();
7157                 return 0;
7158         }
7159         mddev->major_version = MD_MAJOR_VERSION;
7160         mddev->minor_version = MD_MINOR_VERSION;
7161         mddev->patch_version = MD_PATCHLEVEL_VERSION;
7162         mddev->ctime         = ktime_get_real_seconds();
7163
7164         mddev->level         = info->level;
7165         mddev->clevel[0]     = 0;
7166         mddev->dev_sectors   = 2 * (sector_t)info->size;
7167         mddev->raid_disks    = info->raid_disks;
7168         /* don't set md_minor, it is determined by which /dev/md* was
7169          * openned
7170          */
7171         if (info->state & (1<<MD_SB_CLEAN))
7172                 mddev->recovery_cp = MaxSector;
7173         else
7174                 mddev->recovery_cp = 0;
7175         mddev->persistent    = ! info->not_persistent;
7176         mddev->external      = 0;
7177
7178         mddev->layout        = info->layout;
7179         if (mddev->level == 0)
7180                 /* Cannot trust RAID0 layout info here */
7181                 mddev->layout = -1;
7182         mddev->chunk_sectors = info->chunk_size >> 9;
7183
7184         if (mddev->persistent) {
7185                 mddev->max_disks = MD_SB_DISKS;
7186                 mddev->flags = 0;
7187                 mddev->sb_flags = 0;
7188         }
7189         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
7190
7191         mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
7192         mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
7193         mddev->bitmap_info.offset = 0;
7194
7195         mddev->reshape_position = MaxSector;
7196
7197         /*
7198          * Generate a 128 bit UUID
7199          */
7200         get_random_bytes(mddev->uuid, 16);
7201
7202         mddev->new_level = mddev->level;
7203         mddev->new_chunk_sectors = mddev->chunk_sectors;
7204         mddev->new_layout = mddev->layout;
7205         mddev->delta_disks = 0;
7206         mddev->reshape_backwards = 0;
7207
7208         return 0;
7209 }
7210
7211 void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
7212 {
7213         lockdep_assert_held(&mddev->reconfig_mutex);
7214
7215         if (mddev->external_size)
7216                 return;
7217
7218         mddev->array_sectors = array_sectors;
7219 }
7220 EXPORT_SYMBOL(md_set_array_sectors);
7221
7222 static int update_size(struct mddev *mddev, sector_t num_sectors)
7223 {
7224         struct md_rdev *rdev;
7225         int rv;
7226         int fit = (num_sectors == 0);
7227         sector_t old_dev_sectors = mddev->dev_sectors;
7228
7229         if (mddev->pers->resize == NULL)
7230                 return -EINVAL;
7231         /* The "num_sectors" is the number of sectors of each device that
7232          * is used.  This can only make sense for arrays with redundancy.
7233          * linear and raid0 always use whatever space is available. We can only
7234          * consider changing this number if no resync or reconstruction is
7235          * happening, and if the new size is acceptable. It must fit before the
7236          * sb_start or, if that is <data_offset, it must fit before the size
7237          * of each device.  If num_sectors is zero, we find the largest size
7238          * that fits.
7239          */
7240         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
7241             mddev->sync_thread)
7242                 return -EBUSY;
7243         if (mddev->ro)
7244                 return -EROFS;
7245
7246         rdev_for_each(rdev, mddev) {
7247                 sector_t avail = rdev->sectors;
7248
7249                 if (fit && (num_sectors == 0 || num_sectors > avail))
7250                         num_sectors = avail;
7251                 if (avail < num_sectors)
7252                         return -ENOSPC;
7253         }
7254         rv = mddev->pers->resize(mddev, num_sectors);
7255         if (!rv) {
7256                 if (mddev_is_clustered(mddev))
7257                         md_cluster_ops->update_size(mddev, old_dev_sectors);
7258                 else if (mddev->queue) {
7259                         set_capacity(mddev->gendisk, mddev->array_sectors);
7260                         revalidate_disk_size(mddev->gendisk, true);
7261                 }
7262         }
7263         return rv;
7264 }
7265
7266 static int update_raid_disks(struct mddev *mddev, int raid_disks)
7267 {
7268         int rv;
7269         struct md_rdev *rdev;
7270         /* change the number of raid disks */
7271         if (mddev->pers->check_reshape == NULL)
7272                 return -EINVAL;
7273         if (mddev->ro)
7274                 return -EROFS;
7275         if (raid_disks <= 0 ||
7276             (mddev->max_disks && raid_disks >= mddev->max_disks))
7277                 return -EINVAL;
7278         if (mddev->sync_thread ||
7279             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
7280             test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) ||
7281             mddev->reshape_position != MaxSector)
7282                 return -EBUSY;
7283
7284         rdev_for_each(rdev, mddev) {
7285                 if (mddev->raid_disks < raid_disks &&
7286                     rdev->data_offset < rdev->new_data_offset)
7287                         return -EINVAL;
7288                 if (mddev->raid_disks > raid_disks &&
7289                     rdev->data_offset > rdev->new_data_offset)
7290                         return -EINVAL;
7291         }
7292
7293         mddev->delta_disks = raid_disks - mddev->raid_disks;
7294         if (mddev->delta_disks < 0)
7295                 mddev->reshape_backwards = 1;
7296         else if (mddev->delta_disks > 0)
7297                 mddev->reshape_backwards = 0;
7298
7299         rv = mddev->pers->check_reshape(mddev);
7300         if (rv < 0) {
7301                 mddev->delta_disks = 0;
7302                 mddev->reshape_backwards = 0;
7303         }
7304         return rv;
7305 }
7306
7307 /*
7308  * update_array_info is used to change the configuration of an
7309  * on-line array.
7310  * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
7311  * fields in the info are checked against the array.
7312  * Any differences that cannot be handled will cause an error.
7313  * Normally, only one change can be managed at a time.
7314  */
7315 static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
7316 {
7317         int rv = 0;
7318         int cnt = 0;
7319         int state = 0;
7320
7321         /* calculate expected state,ignoring low bits */
7322         if (mddev->bitmap && mddev->bitmap_info.offset)
7323                 state |= (1 << MD_SB_BITMAP_PRESENT);
7324
7325         if (mddev->major_version != info->major_version ||
7326             mddev->minor_version != info->minor_version ||
7327 /*          mddev->patch_version != info->patch_version || */
7328             mddev->ctime         != info->ctime         ||
7329             mddev->level         != info->level         ||
7330 /*          mddev->layout        != info->layout        || */
7331             mddev->persistent    != !info->not_persistent ||
7332             mddev->chunk_sectors != info->chunk_size >> 9 ||
7333             /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
7334             ((state^info->state) & 0xfffffe00)
7335                 )
7336                 return -EINVAL;
7337         /* Check there is only one change */
7338         if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
7339                 cnt++;
7340         if (mddev->raid_disks != info->raid_disks)
7341                 cnt++;
7342         if (mddev->layout != info->layout)
7343                 cnt++;
7344         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
7345                 cnt++;
7346         if (cnt == 0)
7347                 return 0;
7348         if (cnt > 1)
7349                 return -EINVAL;
7350
7351         if (mddev->layout != info->layout) {
7352                 /* Change layout
7353                  * we don't need to do anything at the md level, the
7354                  * personality will take care of it all.
7355                  */
7356                 if (mddev->pers->check_reshape == NULL)
7357                         return -EINVAL;
7358                 else {
7359                         mddev->new_layout = info->layout;
7360                         rv = mddev->pers->check_reshape(mddev);
7361                         if (rv)
7362                                 mddev->new_layout = mddev->layout;
7363                         return rv;
7364                 }
7365         }
7366         if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
7367                 rv = update_size(mddev, (sector_t)info->size * 2);
7368
7369         if (mddev->raid_disks    != info->raid_disks)
7370                 rv = update_raid_disks(mddev, info->raid_disks);
7371
7372         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
7373                 if (mddev->pers->quiesce == NULL || mddev->thread == NULL) {
7374                         rv = -EINVAL;
7375                         goto err;
7376                 }
7377                 if (mddev->recovery || mddev->sync_thread) {
7378                         rv = -EBUSY;
7379                         goto err;
7380                 }
7381                 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
7382                         struct bitmap *bitmap;
7383                         /* add the bitmap */
7384                         if (mddev->bitmap) {
7385                                 rv = -EEXIST;
7386                                 goto err;
7387                         }
7388                         if (mddev->bitmap_info.default_offset == 0) {
7389                                 rv = -EINVAL;
7390                                 goto err;
7391                         }
7392                         mddev->bitmap_info.offset =
7393                                 mddev->bitmap_info.default_offset;
7394                         mddev->bitmap_info.space =
7395                                 mddev->bitmap_info.default_space;
7396                         bitmap = md_bitmap_create(mddev, -1);
7397                         mddev_suspend(mddev);
7398                         if (!IS_ERR(bitmap)) {
7399                                 mddev->bitmap = bitmap;
7400                                 rv = md_bitmap_load(mddev);
7401                         } else
7402                                 rv = PTR_ERR(bitmap);
7403                         if (rv)
7404                                 md_bitmap_destroy(mddev);
7405                         mddev_resume(mddev);
7406                 } else {
7407                         /* remove the bitmap */
7408                         if (!mddev->bitmap) {
7409                                 rv = -ENOENT;
7410                                 goto err;
7411                         }
7412                         if (mddev->bitmap->storage.file) {
7413                                 rv = -EINVAL;
7414                                 goto err;
7415                         }
7416                         if (mddev->bitmap_info.nodes) {
7417                                 /* hold PW on all the bitmap lock */
7418                                 if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) {
7419                                         pr_warn("md: can't change bitmap to none since the array is in use by more than one node\n");
7420                                         rv = -EPERM;
7421                                         md_cluster_ops->unlock_all_bitmaps(mddev);
7422                                         goto err;
7423                                 }
7424
7425                                 mddev->bitmap_info.nodes = 0;
7426                                 md_cluster_ops->leave(mddev);
7427                                 module_put(md_cluster_mod);
7428                                 mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY;
7429                         }
7430                         mddev_suspend(mddev);
7431                         md_bitmap_destroy(mddev);
7432                         mddev_resume(mddev);
7433                         mddev->bitmap_info.offset = 0;
7434                 }
7435         }
7436         md_update_sb(mddev, 1);
7437         return rv;
7438 err:
7439         return rv;
7440 }
7441
7442 static int set_disk_faulty(struct mddev *mddev, dev_t dev)
7443 {
7444         struct md_rdev *rdev;
7445         int err = 0;
7446
7447         if (mddev->pers == NULL)
7448                 return -ENODEV;
7449
7450         rcu_read_lock();
7451         rdev = md_find_rdev_rcu(mddev, dev);
7452         if (!rdev)
7453                 err =  -ENODEV;
7454         else {
7455                 md_error(mddev, rdev);
7456                 if (!test_bit(Faulty, &rdev->flags))
7457                         err = -EBUSY;
7458         }
7459         rcu_read_unlock();
7460         return err;
7461 }
7462
7463 /*
7464  * We have a problem here : there is no easy way to give a CHS
7465  * virtual geometry. We currently pretend that we have a 2 heads
7466  * 4 sectors (with a BIG number of cylinders...). This drives
7467  * dosfs just mad... ;-)
7468  */
7469 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
7470 {
7471         struct mddev *mddev = bdev->bd_disk->private_data;
7472
7473         geo->heads = 2;
7474         geo->sectors = 4;
7475         geo->cylinders = mddev->array_sectors / 8;
7476         return 0;
7477 }
7478
7479 static inline bool md_ioctl_valid(unsigned int cmd)
7480 {
7481         switch (cmd) {
7482         case ADD_NEW_DISK:
7483         case BLKROSET:
7484         case GET_ARRAY_INFO:
7485         case GET_BITMAP_FILE:
7486         case GET_DISK_INFO:
7487         case HOT_ADD_DISK:
7488         case HOT_REMOVE_DISK:
7489         case RAID_VERSION:
7490         case RESTART_ARRAY_RW:
7491         case RUN_ARRAY:
7492         case SET_ARRAY_INFO:
7493         case SET_BITMAP_FILE:
7494         case SET_DISK_FAULTY:
7495         case STOP_ARRAY:
7496         case STOP_ARRAY_RO:
7497         case CLUSTERED_DISK_NACK:
7498                 return true;
7499         default:
7500                 return false;
7501         }
7502 }
7503
7504 static int md_ioctl(struct block_device *bdev, fmode_t mode,
7505                         unsigned int cmd, unsigned long arg)
7506 {
7507         int err = 0;
7508         void __user *argp = (void __user *)arg;
7509         struct mddev *mddev = NULL;
7510         int ro;
7511         bool did_set_md_closing = false;
7512
7513         if (!md_ioctl_valid(cmd))
7514                 return -ENOTTY;
7515
7516         switch (cmd) {
7517         case RAID_VERSION:
7518         case GET_ARRAY_INFO:
7519         case GET_DISK_INFO:
7520                 break;
7521         default:
7522                 if (!capable(CAP_SYS_ADMIN))
7523                         return -EACCES;
7524         }
7525
7526         /*
7527          * Commands dealing with the RAID driver but not any
7528          * particular array:
7529          */
7530         switch (cmd) {
7531         case RAID_VERSION:
7532                 err = get_version(argp);
7533                 goto out;
7534         default:;
7535         }
7536
7537         /*
7538          * Commands creating/starting a new array:
7539          */
7540
7541         mddev = bdev->bd_disk->private_data;
7542
7543         if (!mddev) {
7544                 BUG();
7545                 goto out;
7546         }
7547
7548         /* Some actions do not requires the mutex */
7549         switch (cmd) {
7550         case GET_ARRAY_INFO:
7551                 if (!mddev->raid_disks && !mddev->external)
7552                         err = -ENODEV;
7553                 else
7554                         err = get_array_info(mddev, argp);
7555                 goto out;
7556
7557         case GET_DISK_INFO:
7558                 if (!mddev->raid_disks && !mddev->external)
7559                         err = -ENODEV;
7560                 else
7561                         err = get_disk_info(mddev, argp);
7562                 goto out;
7563
7564         case SET_DISK_FAULTY:
7565                 err = set_disk_faulty(mddev, new_decode_dev(arg));
7566                 goto out;
7567
7568         case GET_BITMAP_FILE:
7569                 err = get_bitmap_file(mddev, argp);
7570                 goto out;
7571
7572         }
7573
7574         if (cmd == ADD_NEW_DISK || cmd == HOT_ADD_DISK)
7575                 flush_rdev_wq(mddev);
7576
7577         if (cmd == HOT_REMOVE_DISK)
7578                 /* need to ensure recovery thread has run */
7579                 wait_event_interruptible_timeout(mddev->sb_wait,
7580                                                  !test_bit(MD_RECOVERY_NEEDED,
7581                                                            &mddev->recovery),
7582                                                  msecs_to_jiffies(5000));
7583         if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
7584                 /* Need to flush page cache, and ensure no-one else opens
7585                  * and writes
7586                  */
7587                 mutex_lock(&mddev->open_mutex);
7588                 if (mddev->pers && atomic_read(&mddev->openers) > 1) {
7589                         mutex_unlock(&mddev->open_mutex);
7590                         err = -EBUSY;
7591                         goto out;
7592                 }
7593                 if (test_and_set_bit(MD_CLOSING, &mddev->flags)) {
7594                         mutex_unlock(&mddev->open_mutex);
7595                         err = -EBUSY;
7596                         goto out;
7597                 }
7598                 did_set_md_closing = true;
7599                 mutex_unlock(&mddev->open_mutex);
7600                 sync_blockdev(bdev);
7601         }
7602         err = mddev_lock(mddev);
7603         if (err) {
7604                 pr_debug("md: ioctl lock interrupted, reason %d, cmd %d\n",
7605                          err, cmd);
7606                 goto out;
7607         }
7608
7609         if (cmd == SET_ARRAY_INFO) {
7610                 mdu_array_info_t info;
7611                 if (!arg)
7612                         memset(&info, 0, sizeof(info));
7613                 else if (copy_from_user(&info, argp, sizeof(info))) {
7614                         err = -EFAULT;
7615                         goto unlock;
7616                 }
7617                 if (mddev->pers) {
7618                         err = update_array_info(mddev, &info);
7619                         if (err) {
7620                                 pr_warn("md: couldn't update array info. %d\n", err);
7621                                 goto unlock;
7622                         }
7623                         goto unlock;
7624                 }
7625                 if (!list_empty(&mddev->disks)) {
7626                         pr_warn("md: array %s already has disks!\n", mdname(mddev));
7627                         err = -EBUSY;
7628                         goto unlock;
7629                 }
7630                 if (mddev->raid_disks) {
7631                         pr_warn("md: array %s already initialised!\n", mdname(mddev));
7632                         err = -EBUSY;
7633                         goto unlock;
7634                 }
7635                 err = md_set_array_info(mddev, &info);
7636                 if (err) {
7637                         pr_warn("md: couldn't set array info. %d\n", err);
7638                         goto unlock;
7639                 }
7640                 goto unlock;
7641         }
7642
7643         /*
7644          * Commands querying/configuring an existing array:
7645          */
7646         /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
7647          * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
7648         if ((!mddev->raid_disks && !mddev->external)
7649             && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
7650             && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
7651             && cmd != GET_BITMAP_FILE) {
7652                 err = -ENODEV;
7653                 goto unlock;
7654         }
7655
7656         /*
7657          * Commands even a read-only array can execute:
7658          */
7659         switch (cmd) {
7660         case RESTART_ARRAY_RW:
7661                 err = restart_array(mddev);
7662                 goto unlock;
7663
7664         case STOP_ARRAY:
7665                 err = do_md_stop(mddev, 0, bdev);
7666                 goto unlock;
7667
7668         case STOP_ARRAY_RO:
7669                 err = md_set_readonly(mddev, bdev);
7670                 goto unlock;
7671
7672         case HOT_REMOVE_DISK:
7673                 err = hot_remove_disk(mddev, new_decode_dev(arg));
7674                 goto unlock;
7675
7676         case ADD_NEW_DISK:
7677                 /* We can support ADD_NEW_DISK on read-only arrays
7678                  * only if we are re-adding a preexisting device.
7679                  * So require mddev->pers and MD_DISK_SYNC.
7680                  */
7681                 if (mddev->pers) {
7682                         mdu_disk_info_t info;
7683                         if (copy_from_user(&info, argp, sizeof(info)))
7684                                 err = -EFAULT;
7685                         else if (!(info.state & (1<<MD_DISK_SYNC)))
7686                                 /* Need to clear read-only for this */
7687                                 break;
7688                         else
7689                                 err = md_add_new_disk(mddev, &info);
7690                         goto unlock;
7691                 }
7692                 break;
7693
7694         case BLKROSET:
7695                 if (get_user(ro, (int __user *)(arg))) {
7696                         err = -EFAULT;
7697                         goto unlock;
7698                 }
7699                 err = -EINVAL;
7700
7701                 /* if the bdev is going readonly the value of mddev->ro
7702                  * does not matter, no writes are coming
7703                  */
7704                 if (ro)
7705                         goto unlock;
7706
7707                 /* are we are already prepared for writes? */
7708                 if (mddev->ro != 1)
7709                         goto unlock;
7710
7711                 /* transitioning to readauto need only happen for
7712                  * arrays that call md_write_start
7713                  */
7714                 if (mddev->pers) {
7715                         err = restart_array(mddev);
7716                         if (err == 0) {
7717                                 mddev->ro = 2;
7718                                 set_disk_ro(mddev->gendisk, 0);
7719                         }
7720                 }
7721                 goto unlock;
7722         }
7723
7724         /*
7725          * The remaining ioctls are changing the state of the
7726          * superblock, so we do not allow them on read-only arrays.
7727          */
7728         if (mddev->ro && mddev->pers) {
7729                 if (mddev->ro == 2) {
7730                         mddev->ro = 0;
7731                         sysfs_notify_dirent_safe(mddev->sysfs_state);
7732                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7733                         /* mddev_unlock will wake thread */
7734                         /* If a device failed while we were read-only, we
7735                          * need to make sure the metadata is updated now.
7736                          */
7737                         if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) {
7738                                 mddev_unlock(mddev);
7739                                 wait_event(mddev->sb_wait,
7740                                            !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) &&
7741                                            !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
7742                                 mddev_lock_nointr(mddev);
7743                         }
7744                 } else {
7745                         err = -EROFS;
7746                         goto unlock;
7747                 }
7748         }
7749
7750         switch (cmd) {
7751         case ADD_NEW_DISK:
7752         {
7753                 mdu_disk_info_t info;
7754                 if (copy_from_user(&info, argp, sizeof(info)))
7755                         err = -EFAULT;
7756                 else
7757                         err = md_add_new_disk(mddev, &info);
7758                 goto unlock;
7759         }
7760
7761         case CLUSTERED_DISK_NACK:
7762                 if (mddev_is_clustered(mddev))
7763                         md_cluster_ops->new_disk_ack(mddev, false);
7764                 else
7765                         err = -EINVAL;
7766                 goto unlock;
7767
7768         case HOT_ADD_DISK:
7769                 err = hot_add_disk(mddev, new_decode_dev(arg));
7770                 goto unlock;
7771
7772         case RUN_ARRAY:
7773                 err = do_md_run(mddev);
7774                 goto unlock;
7775
7776         case SET_BITMAP_FILE:
7777                 err = set_bitmap_file(mddev, (int)arg);
7778                 goto unlock;
7779
7780         default:
7781                 err = -EINVAL;
7782                 goto unlock;
7783         }
7784
7785 unlock:
7786         if (mddev->hold_active == UNTIL_IOCTL &&
7787             err != -EINVAL)
7788                 mddev->hold_active = 0;
7789         mddev_unlock(mddev);
7790 out:
7791         if(did_set_md_closing)
7792                 clear_bit(MD_CLOSING, &mddev->flags);
7793         return err;
7794 }
7795 #ifdef CONFIG_COMPAT
7796 static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
7797                     unsigned int cmd, unsigned long arg)
7798 {
7799         switch (cmd) {
7800         case HOT_REMOVE_DISK:
7801         case HOT_ADD_DISK:
7802         case SET_DISK_FAULTY:
7803         case SET_BITMAP_FILE:
7804                 /* These take in integer arg, do not convert */
7805                 break;
7806         default:
7807                 arg = (unsigned long)compat_ptr(arg);
7808                 break;
7809         }
7810
7811         return md_ioctl(bdev, mode, cmd, arg);
7812 }
7813 #endif /* CONFIG_COMPAT */
7814
7815 static int md_open(struct block_device *bdev, fmode_t mode)
7816 {
7817         /*
7818          * Succeed if we can lock the mddev, which confirms that
7819          * it isn't being stopped right now.
7820          */
7821         struct mddev *mddev = mddev_find(bdev->bd_dev);
7822         int err;
7823
7824         if (!mddev)
7825                 return -ENODEV;
7826
7827         if (mddev->gendisk != bdev->bd_disk) {
7828                 /* we are racing with mddev_put which is discarding this
7829                  * bd_disk.
7830                  */
7831                 mddev_put(mddev);
7832                 /* Wait until bdev->bd_disk is definitely gone */
7833                 if (work_pending(&mddev->del_work))
7834                         flush_workqueue(md_misc_wq);
7835                 return -EBUSY;
7836         }
7837         BUG_ON(mddev != bdev->bd_disk->private_data);
7838
7839         if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
7840                 goto out;
7841
7842         if (test_bit(MD_CLOSING, &mddev->flags)) {
7843                 mutex_unlock(&mddev->open_mutex);
7844                 err = -ENODEV;
7845                 goto out;
7846         }
7847
7848         err = 0;
7849         atomic_inc(&mddev->openers);
7850         mutex_unlock(&mddev->open_mutex);
7851
7852         bdev_check_media_change(bdev);
7853  out:
7854         if (err)
7855                 mddev_put(mddev);
7856         return err;
7857 }
7858
7859 static void md_release(struct gendisk *disk, fmode_t mode)
7860 {
7861         struct mddev *mddev = disk->private_data;
7862
7863         BUG_ON(!mddev);
7864         atomic_dec(&mddev->openers);
7865         mddev_put(mddev);
7866 }
7867
7868 static unsigned int md_check_events(struct gendisk *disk, unsigned int clearing)
7869 {
7870         struct mddev *mddev = disk->private_data;
7871         unsigned int ret = 0;
7872
7873         if (mddev->changed)
7874                 ret = DISK_EVENT_MEDIA_CHANGE;
7875         mddev->changed = 0;
7876         return ret;
7877 }
7878
7879 const struct block_device_operations md_fops =
7880 {
7881         .owner          = THIS_MODULE,
7882         .submit_bio     = md_submit_bio,
7883         .open           = md_open,
7884         .release        = md_release,
7885         .ioctl          = md_ioctl,
7886 #ifdef CONFIG_COMPAT
7887         .compat_ioctl   = md_compat_ioctl,
7888 #endif
7889         .getgeo         = md_getgeo,
7890         .check_events   = md_check_events,
7891 };
7892
7893 static int md_thread(void *arg)
7894 {
7895         struct md_thread *thread = arg;
7896
7897         /*
7898          * md_thread is a 'system-thread', it's priority should be very
7899          * high. We avoid resource deadlocks individually in each
7900          * raid personality. (RAID5 does preallocation) We also use RR and
7901          * the very same RT priority as kswapd, thus we will never get
7902          * into a priority inversion deadlock.
7903          *
7904          * we definitely have to have equal or higher priority than
7905          * bdflush, otherwise bdflush will deadlock if there are too
7906          * many dirty RAID5 blocks.
7907          */
7908
7909         allow_signal(SIGKILL);
7910         while (!kthread_should_stop()) {
7911
7912                 /* We need to wait INTERRUPTIBLE so that
7913                  * we don't add to the load-average.
7914                  * That means we need to be sure no signals are
7915                  * pending
7916                  */
7917                 if (signal_pending(current))
7918                         flush_signals(current);
7919
7920                 wait_event_interruptible_timeout
7921                         (thread->wqueue,
7922                          test_bit(THREAD_WAKEUP, &thread->flags)
7923                          || kthread_should_stop() || kthread_should_park(),
7924                          thread->timeout);
7925
7926                 clear_bit(THREAD_WAKEUP, &thread->flags);
7927                 if (kthread_should_park())
7928                         kthread_parkme();
7929                 if (!kthread_should_stop())
7930                         thread->run(thread);
7931         }
7932
7933         return 0;
7934 }
7935
7936 void md_wakeup_thread(struct md_thread *thread)
7937 {
7938         if (thread) {
7939                 pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm);
7940                 set_bit(THREAD_WAKEUP, &thread->flags);
7941                 wake_up(&thread->wqueue);
7942         }
7943 }
7944 EXPORT_SYMBOL(md_wakeup_thread);
7945
7946 struct md_thread *md_register_thread(void (*run) (struct md_thread *),
7947                 struct mddev *mddev, const char *name)
7948 {
7949         struct md_thread *thread;
7950
7951         thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL);
7952         if (!thread)
7953                 return NULL;
7954
7955         init_waitqueue_head(&thread->wqueue);
7956
7957         thread->run = run;
7958         thread->mddev = mddev;
7959         thread->timeout = MAX_SCHEDULE_TIMEOUT;
7960         thread->tsk = kthread_run(md_thread, thread,
7961                                   "%s_%s",
7962                                   mdname(thread->mddev),
7963                                   name);
7964         if (IS_ERR(thread->tsk)) {
7965                 kfree(thread);
7966                 return NULL;
7967         }
7968         return thread;
7969 }
7970 EXPORT_SYMBOL(md_register_thread);
7971
7972 void md_unregister_thread(struct md_thread **threadp)
7973 {
7974         struct md_thread *thread;
7975
7976         /*
7977          * Locking ensures that mddev_unlock does not wake_up a
7978          * non-existent thread
7979          */
7980         spin_lock(&pers_lock);
7981         thread = *threadp;
7982         if (!thread) {
7983                 spin_unlock(&pers_lock);
7984                 return;
7985         }
7986         *threadp = NULL;
7987         spin_unlock(&pers_lock);
7988
7989         pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
7990         kthread_stop(thread->tsk);
7991         kfree(thread);
7992 }
7993 EXPORT_SYMBOL(md_unregister_thread);
7994
7995 void md_error(struct mddev *mddev, struct md_rdev *rdev)
7996 {
7997         if (!rdev || test_bit(Faulty, &rdev->flags))
7998                 return;
7999
8000         if (!mddev->pers || !mddev->pers->error_handler)
8001                 return;
8002         mddev->pers->error_handler(mddev,rdev);
8003         if (mddev->degraded)
8004                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8005         sysfs_notify_dirent_safe(rdev->sysfs_state);
8006         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8007         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8008         md_wakeup_thread(mddev->thread);
8009         if (mddev->event_work.func)
8010                 queue_work(md_misc_wq, &mddev->event_work);
8011         md_new_event(mddev);
8012 }
8013 EXPORT_SYMBOL(md_error);
8014
8015 /* seq_file implementation /proc/mdstat */
8016
8017 static void status_unused(struct seq_file *seq)
8018 {
8019         int i = 0;
8020         struct md_rdev *rdev;
8021
8022         seq_printf(seq, "unused devices: ");
8023
8024         list_for_each_entry(rdev, &pending_raid_disks, same_set) {
8025                 char b[BDEVNAME_SIZE];
8026                 i++;
8027                 seq_printf(seq, "%s ",
8028                               bdevname(rdev->bdev,b));
8029         }
8030         if (!i)
8031                 seq_printf(seq, "<none>");
8032
8033         seq_printf(seq, "\n");
8034 }
8035
8036 static int status_resync(struct seq_file *seq, struct mddev *mddev)
8037 {
8038         sector_t max_sectors, resync, res;
8039         unsigned long dt, db = 0;
8040         sector_t rt, curr_mark_cnt, resync_mark_cnt;
8041         int scale, recovery_active;
8042         unsigned int per_milli;
8043
8044         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
8045             test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
8046                 max_sectors = mddev->resync_max_sectors;
8047         else
8048                 max_sectors = mddev->dev_sectors;
8049
8050         resync = mddev->curr_resync;
8051         if (resync <= 3) {
8052                 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
8053                         /* Still cleaning up */
8054                         resync = max_sectors;
8055         } else if (resync > max_sectors)
8056                 resync = max_sectors;
8057         else
8058                 resync -= atomic_read(&mddev->recovery_active);
8059
8060         if (resync == 0) {
8061                 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery)) {
8062                         struct md_rdev *rdev;
8063
8064                         rdev_for_each(rdev, mddev)
8065                                 if (rdev->raid_disk >= 0 &&
8066                                     !test_bit(Faulty, &rdev->flags) &&
8067                                     rdev->recovery_offset != MaxSector &&
8068                                     rdev->recovery_offset) {
8069                                         seq_printf(seq, "\trecover=REMOTE");
8070                                         return 1;
8071                                 }
8072                         if (mddev->reshape_position != MaxSector)
8073                                 seq_printf(seq, "\treshape=REMOTE");
8074                         else
8075                                 seq_printf(seq, "\tresync=REMOTE");
8076                         return 1;
8077                 }
8078                 if (mddev->recovery_cp < MaxSector) {
8079                         seq_printf(seq, "\tresync=PENDING");
8080                         return 1;
8081                 }
8082                 return 0;
8083         }
8084         if (resync < 3) {
8085                 seq_printf(seq, "\tresync=DELAYED");
8086                 return 1;
8087         }
8088
8089         WARN_ON(max_sectors == 0);
8090         /* Pick 'scale' such that (resync>>scale)*1000 will fit
8091          * in a sector_t, and (max_sectors>>scale) will fit in a
8092          * u32, as those are the requirements for sector_div.
8093          * Thus 'scale' must be at least 10
8094          */
8095         scale = 10;
8096         if (sizeof(sector_t) > sizeof(unsigned long)) {
8097                 while ( max_sectors/2 > (1ULL<<(scale+32)))
8098                         scale++;
8099         }
8100         res = (resync>>scale)*1000;
8101         sector_div(res, (u32)((max_sectors>>scale)+1));
8102
8103         per_milli = res;
8104         {
8105                 int i, x = per_milli/50, y = 20-x;
8106                 seq_printf(seq, "[");
8107                 for (i = 0; i < x; i++)
8108                         seq_printf(seq, "=");
8109                 seq_printf(seq, ">");
8110                 for (i = 0; i < y; i++)
8111                         seq_printf(seq, ".");
8112                 seq_printf(seq, "] ");
8113         }
8114         seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
8115                    (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
8116                     "reshape" :
8117                     (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
8118                      "check" :
8119                      (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
8120                       "resync" : "recovery"))),
8121                    per_milli/10, per_milli % 10,
8122                    (unsigned long long) resync/2,
8123                    (unsigned long long) max_sectors/2);
8124
8125         /*
8126          * dt: time from mark until now
8127          * db: blocks written from mark until now
8128          * rt: remaining time
8129          *
8130          * rt is a sector_t, which is always 64bit now. We are keeping
8131          * the original algorithm, but it is not really necessary.
8132          *
8133          * Original algorithm:
8134          *   So we divide before multiply in case it is 32bit and close
8135          *   to the limit.
8136          *   We scale the divisor (db) by 32 to avoid losing precision
8137          *   near the end of resync when the number of remaining sectors
8138          *   is close to 'db'.
8139          *   We then divide rt by 32 after multiplying by db to compensate.
8140          *   The '+1' avoids division by zero if db is very small.
8141          */
8142         dt = ((jiffies - mddev->resync_mark) / HZ);
8143         if (!dt) dt++;
8144
8145         curr_mark_cnt = mddev->curr_mark_cnt;
8146         recovery_active = atomic_read(&mddev->recovery_active);
8147         resync_mark_cnt = mddev->resync_mark_cnt;
8148
8149         if (curr_mark_cnt >= (recovery_active + resync_mark_cnt))
8150                 db = curr_mark_cnt - (recovery_active + resync_mark_cnt);
8151
8152         rt = max_sectors - resync;    /* number of remaining sectors */
8153         rt = div64_u64(rt, db/32+1);
8154         rt *= dt;
8155         rt >>= 5;
8156
8157         seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
8158                    ((unsigned long)rt % 60)/6);
8159
8160         seq_printf(seq, " speed=%ldK/sec", db/2/dt);
8161         return 1;
8162 }
8163
8164 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
8165 {
8166         struct list_head *tmp;
8167         loff_t l = *pos;
8168         struct mddev *mddev;
8169
8170         if (l == 0x10000) {
8171                 ++*pos;
8172                 return (void *)2;
8173         }
8174         if (l > 0x10000)
8175                 return NULL;
8176         if (!l--)
8177                 /* header */
8178                 return (void*)1;
8179
8180         spin_lock(&all_mddevs_lock);
8181         list_for_each(tmp,&all_mddevs)
8182                 if (!l--) {
8183                         mddev = list_entry(tmp, struct mddev, all_mddevs);
8184                         mddev_get(mddev);
8185                         spin_unlock(&all_mddevs_lock);
8186                         return mddev;
8187                 }
8188         spin_unlock(&all_mddevs_lock);
8189         if (!l--)
8190                 return (void*)2;/* tail */
8191         return NULL;
8192 }
8193
8194 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
8195 {
8196         struct list_head *tmp;
8197         struct mddev *next_mddev, *mddev = v;
8198
8199         ++*pos;
8200         if (v == (void*)2)
8201                 return NULL;
8202
8203         spin_lock(&all_mddevs_lock);
8204         if (v == (void*)1)
8205                 tmp = all_mddevs.next;
8206         else
8207                 tmp = mddev->all_mddevs.next;
8208         if (tmp != &all_mddevs)
8209                 next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs));
8210         else {
8211                 next_mddev = (void*)2;
8212                 *pos = 0x10000;
8213         }
8214         spin_unlock(&all_mddevs_lock);
8215
8216         if (v != (void*)1)
8217                 mddev_put(mddev);
8218         return next_mddev;
8219
8220 }
8221
8222 static void md_seq_stop(struct seq_file *seq, void *v)
8223 {
8224         struct mddev *mddev = v;
8225
8226         if (mddev && v != (void*)1 && v != (void*)2)
8227                 mddev_put(mddev);
8228 }
8229
8230 static int md_seq_show(struct seq_file *seq, void *v)
8231 {
8232         struct mddev *mddev = v;
8233         sector_t sectors;
8234         struct md_rdev *rdev;
8235
8236         if (v == (void*)1) {
8237                 struct md_personality *pers;
8238                 seq_printf(seq, "Personalities : ");
8239                 spin_lock(&pers_lock);
8240                 list_for_each_entry(pers, &pers_list, list)
8241                         seq_printf(seq, "[%s] ", pers->name);
8242
8243                 spin_unlock(&pers_lock);
8244                 seq_printf(seq, "\n");
8245                 seq->poll_event = atomic_read(&md_event_count);
8246                 return 0;
8247         }
8248         if (v == (void*)2) {
8249                 status_unused(seq);
8250                 return 0;
8251         }
8252
8253         spin_lock(&mddev->lock);
8254         if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
8255                 seq_printf(seq, "%s : %sactive", mdname(mddev),
8256                                                 mddev->pers ? "" : "in");
8257                 if (mddev->pers) {
8258                         if (mddev->ro==1)
8259                                 seq_printf(seq, " (read-only)");
8260                         if (mddev->ro==2)
8261                                 seq_printf(seq, " (auto-read-only)");
8262                         seq_printf(seq, " %s", mddev->pers->name);
8263                 }
8264
8265                 sectors = 0;
8266                 rcu_read_lock();
8267                 rdev_for_each_rcu(rdev, mddev) {
8268                         char b[BDEVNAME_SIZE];
8269                         seq_printf(seq, " %s[%d]",
8270                                 bdevname(rdev->bdev,b), rdev->desc_nr);
8271                         if (test_bit(WriteMostly, &rdev->flags))
8272                                 seq_printf(seq, "(W)");
8273                         if (test_bit(Journal, &rdev->flags))
8274                                 seq_printf(seq, "(J)");
8275                         if (test_bit(Faulty, &rdev->flags)) {
8276                                 seq_printf(seq, "(F)");
8277                                 continue;
8278                         }
8279                         if (rdev->raid_disk < 0)
8280                                 seq_printf(seq, "(S)"); /* spare */
8281                         if (test_bit(Replacement, &rdev->flags))
8282                                 seq_printf(seq, "(R)");
8283                         sectors += rdev->sectors;
8284                 }
8285                 rcu_read_unlock();
8286
8287                 if (!list_empty(&mddev->disks)) {
8288                         if (mddev->pers)
8289                                 seq_printf(seq, "\n      %llu blocks",
8290                                            (unsigned long long)
8291                                            mddev->array_sectors / 2);
8292                         else
8293                                 seq_printf(seq, "\n      %llu blocks",
8294                                            (unsigned long long)sectors / 2);
8295                 }
8296                 if (mddev->persistent) {
8297                         if (mddev->major_version != 0 ||
8298                             mddev->minor_version != 90) {
8299                                 seq_printf(seq," super %d.%d",
8300                                            mddev->major_version,
8301                                            mddev->minor_version);
8302                         }
8303                 } else if (mddev->external)
8304                         seq_printf(seq, " super external:%s",
8305                                    mddev->metadata_type);
8306                 else
8307                         seq_printf(seq, " super non-persistent");
8308
8309                 if (mddev->pers) {
8310                         mddev->pers->status(seq, mddev);
8311                         seq_printf(seq, "\n      ");
8312                         if (mddev->pers->sync_request) {
8313                                 if (status_resync(seq, mddev))
8314                                         seq_printf(seq, "\n      ");
8315                         }
8316                 } else
8317                         seq_printf(seq, "\n       ");
8318
8319                 md_bitmap_status(seq, mddev->bitmap);
8320
8321                 seq_printf(seq, "\n");
8322         }
8323         spin_unlock(&mddev->lock);
8324
8325         return 0;
8326 }
8327
8328 static const struct seq_operations md_seq_ops = {
8329         .start  = md_seq_start,
8330         .next   = md_seq_next,
8331         .stop   = md_seq_stop,
8332         .show   = md_seq_show,
8333 };
8334
8335 static int md_seq_open(struct inode *inode, struct file *file)
8336 {
8337         struct seq_file *seq;
8338         int error;
8339
8340         error = seq_open(file, &md_seq_ops);
8341         if (error)
8342                 return error;
8343
8344         seq = file->private_data;
8345         seq->poll_event = atomic_read(&md_event_count);
8346         return error;
8347 }
8348
8349 static int md_unloading;
8350 static __poll_t mdstat_poll(struct file *filp, poll_table *wait)
8351 {
8352         struct seq_file *seq = filp->private_data;
8353         __poll_t mask;
8354
8355         if (md_unloading)
8356                 return EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI;
8357         poll_wait(filp, &md_event_waiters, wait);
8358
8359         /* always allow read */
8360         mask = EPOLLIN | EPOLLRDNORM;
8361
8362         if (seq->poll_event != atomic_read(&md_event_count))
8363                 mask |= EPOLLERR | EPOLLPRI;
8364         return mask;
8365 }
8366
8367 static const struct proc_ops mdstat_proc_ops = {
8368         .proc_open      = md_seq_open,
8369         .proc_read      = seq_read,
8370         .proc_lseek     = seq_lseek,
8371         .proc_release   = seq_release,
8372         .proc_poll      = mdstat_poll,
8373 };
8374
8375 int register_md_personality(struct md_personality *p)
8376 {
8377         pr_debug("md: %s personality registered for level %d\n",
8378                  p->name, p->level);
8379         spin_lock(&pers_lock);
8380         list_add_tail(&p->list, &pers_list);
8381         spin_unlock(&pers_lock);
8382         return 0;
8383 }
8384 EXPORT_SYMBOL(register_md_personality);
8385
8386 int unregister_md_personality(struct md_personality *p)
8387 {
8388         pr_debug("md: %s personality unregistered\n", p->name);
8389         spin_lock(&pers_lock);
8390         list_del_init(&p->list);
8391         spin_unlock(&pers_lock);
8392         return 0;
8393 }
8394 EXPORT_SYMBOL(unregister_md_personality);
8395
8396 int register_md_cluster_operations(struct md_cluster_operations *ops,
8397                                    struct module *module)
8398 {
8399         int ret = 0;
8400         spin_lock(&pers_lock);
8401         if (md_cluster_ops != NULL)
8402                 ret = -EALREADY;
8403         else {
8404                 md_cluster_ops = ops;
8405                 md_cluster_mod = module;
8406         }
8407         spin_unlock(&pers_lock);
8408         return ret;
8409 }
8410 EXPORT_SYMBOL(register_md_cluster_operations);
8411
8412 int unregister_md_cluster_operations(void)
8413 {
8414         spin_lock(&pers_lock);
8415         md_cluster_ops = NULL;
8416         spin_unlock(&pers_lock);
8417         return 0;
8418 }
8419 EXPORT_SYMBOL(unregister_md_cluster_operations);
8420
8421 int md_setup_cluster(struct mddev *mddev, int nodes)
8422 {
8423         int ret;
8424         if (!md_cluster_ops)
8425                 request_module("md-cluster");
8426         spin_lock(&pers_lock);
8427         /* ensure module won't be unloaded */
8428         if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
8429                 pr_warn("can't find md-cluster module or get it's reference.\n");
8430                 spin_unlock(&pers_lock);
8431                 return -ENOENT;
8432         }
8433         spin_unlock(&pers_lock);
8434
8435         ret = md_cluster_ops->join(mddev, nodes);
8436         if (!ret)
8437                 mddev->safemode_delay = 0;
8438         return ret;
8439 }
8440
8441 void md_cluster_stop(struct mddev *mddev)
8442 {
8443         if (!md_cluster_ops)
8444                 return;
8445         md_cluster_ops->leave(mddev);
8446         module_put(md_cluster_mod);
8447 }
8448
8449 static int is_mddev_idle(struct mddev *mddev, int init)
8450 {
8451         struct md_rdev *rdev;
8452         int idle;
8453         int curr_events;
8454
8455         idle = 1;
8456         rcu_read_lock();
8457         rdev_for_each_rcu(rdev, mddev) {
8458                 struct gendisk *disk = rdev->bdev->bd_disk;
8459                 curr_events = (int)part_stat_read_accum(&disk->part0, sectors) -
8460                               atomic_read(&disk->sync_io);
8461                 /* sync IO will cause sync_io to increase before the disk_stats
8462                  * as sync_io is counted when a request starts, and
8463                  * disk_stats is counted when it completes.
8464                  * So resync activity will cause curr_events to be smaller than
8465                  * when there was no such activity.
8466                  * non-sync IO will cause disk_stat to increase without
8467                  * increasing sync_io so curr_events will (eventually)
8468                  * be larger than it was before.  Once it becomes
8469                  * substantially larger, the test below will cause
8470                  * the array to appear non-idle, and resync will slow
8471                  * down.
8472                  * If there is a lot of outstanding resync activity when
8473                  * we set last_event to curr_events, then all that activity
8474                  * completing might cause the array to appear non-idle
8475                  * and resync will be slowed down even though there might
8476                  * not have been non-resync activity.  This will only
8477                  * happen once though.  'last_events' will soon reflect
8478                  * the state where there is little or no outstanding
8479                  * resync requests, and further resync activity will
8480                  * always make curr_events less than last_events.
8481                  *
8482                  */
8483                 if (init || curr_events - rdev->last_events > 64) {
8484                         rdev->last_events = curr_events;
8485                         idle = 0;
8486                 }
8487         }
8488         rcu_read_unlock();
8489         return idle;
8490 }
8491
8492 void md_done_sync(struct mddev *mddev, int blocks, int ok)
8493 {
8494         /* another "blocks" (512byte) blocks have been synced */
8495         atomic_sub(blocks, &mddev->recovery_active);
8496         wake_up(&mddev->recovery_wait);
8497         if (!ok) {
8498                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8499                 set_bit(MD_RECOVERY_ERROR, &mddev->recovery);
8500                 md_wakeup_thread(mddev->thread);
8501                 // stop recovery, signal do_sync ....
8502         }
8503 }
8504 EXPORT_SYMBOL(md_done_sync);
8505
8506 /* md_write_start(mddev, bi)
8507  * If we need to update some array metadata (e.g. 'active' flag
8508  * in superblock) before writing, schedule a superblock update
8509  * and wait for it to complete.
8510  * A return value of 'false' means that the write wasn't recorded
8511  * and cannot proceed as the array is being suspend.
8512  */
8513 bool md_write_start(struct mddev *mddev, struct bio *bi)
8514 {
8515         int did_change = 0;
8516
8517         if (bio_data_dir(bi) != WRITE)
8518                 return true;
8519
8520         BUG_ON(mddev->ro == 1);
8521         if (mddev->ro == 2) {
8522                 /* need to switch to read/write */
8523                 mddev->ro = 0;
8524                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8525                 md_wakeup_thread(mddev->thread);
8526                 md_wakeup_thread(mddev->sync_thread);
8527                 did_change = 1;
8528         }
8529         rcu_read_lock();
8530         percpu_ref_get(&mddev->writes_pending);
8531         smp_mb(); /* Match smp_mb in set_in_sync() */
8532         if (mddev->safemode == 1)
8533                 mddev->safemode = 0;
8534         /* sync_checkers is always 0 when writes_pending is in per-cpu mode */
8535         if (mddev->in_sync || mddev->sync_checkers) {
8536                 spin_lock(&mddev->lock);
8537                 if (mddev->in_sync) {
8538                         mddev->in_sync = 0;
8539                         set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
8540                         set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
8541                         md_wakeup_thread(mddev->thread);
8542                         did_change = 1;
8543                 }
8544                 spin_unlock(&mddev->lock);
8545         }
8546         rcu_read_unlock();
8547         if (did_change)
8548                 sysfs_notify_dirent_safe(mddev->sysfs_state);
8549         if (!mddev->has_superblocks)
8550                 return true;
8551         wait_event(mddev->sb_wait,
8552                    !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) ||
8553                    mddev->suspended);
8554         if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
8555                 percpu_ref_put(&mddev->writes_pending);
8556                 return false;
8557         }
8558         return true;
8559 }
8560 EXPORT_SYMBOL(md_write_start);
8561
8562 /* md_write_inc can only be called when md_write_start() has
8563  * already been called at least once of the current request.
8564  * It increments the counter and is useful when a single request
8565  * is split into several parts.  Each part causes an increment and
8566  * so needs a matching md_write_end().
8567  * Unlike md_write_start(), it is safe to call md_write_inc() inside
8568  * a spinlocked region.
8569  */
8570 void md_write_inc(struct mddev *mddev, struct bio *bi)
8571 {
8572         if (bio_data_dir(bi) != WRITE)
8573                 return;
8574         WARN_ON_ONCE(mddev->in_sync || mddev->ro);
8575         percpu_ref_get(&mddev->writes_pending);
8576 }
8577 EXPORT_SYMBOL(md_write_inc);
8578
8579 void md_write_end(struct mddev *mddev)
8580 {
8581         percpu_ref_put(&mddev->writes_pending);
8582
8583         if (mddev->safemode == 2)
8584                 md_wakeup_thread(mddev->thread);
8585         else if (mddev->safemode_delay)
8586                 /* The roundup() ensures this only performs locking once
8587                  * every ->safemode_delay jiffies
8588                  */
8589                 mod_timer(&mddev->safemode_timer,
8590                           roundup(jiffies, mddev->safemode_delay) +
8591                           mddev->safemode_delay);
8592 }
8593
8594 EXPORT_SYMBOL(md_write_end);
8595
8596 /* md_allow_write(mddev)
8597  * Calling this ensures that the array is marked 'active' so that writes
8598  * may proceed without blocking.  It is important to call this before
8599  * attempting a GFP_KERNEL allocation while holding the mddev lock.
8600  * Must be called with mddev_lock held.
8601  */
8602 void md_allow_write(struct mddev *mddev)
8603 {
8604         if (!mddev->pers)
8605                 return;
8606         if (mddev->ro)
8607                 return;
8608         if (!mddev->pers->sync_request)
8609                 return;
8610
8611         spin_lock(&mddev->lock);
8612         if (mddev->in_sync) {
8613                 mddev->in_sync = 0;
8614                 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
8615                 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
8616                 if (mddev->safemode_delay &&
8617                     mddev->safemode == 0)
8618                         mddev->safemode = 1;
8619                 spin_unlock(&mddev->lock);
8620                 md_update_sb(mddev, 0);
8621                 sysfs_notify_dirent_safe(mddev->sysfs_state);
8622                 /* wait for the dirty state to be recorded in the metadata */
8623                 wait_event(mddev->sb_wait,
8624                            !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
8625         } else
8626                 spin_unlock(&mddev->lock);
8627 }
8628 EXPORT_SYMBOL_GPL(md_allow_write);
8629
8630 #define SYNC_MARKS      10
8631 #define SYNC_MARK_STEP  (3*HZ)
8632 #define UPDATE_FREQUENCY (5*60*HZ)
8633 void md_do_sync(struct md_thread *thread)
8634 {
8635         struct mddev *mddev = thread->mddev;
8636         struct mddev *mddev2;
8637         unsigned int currspeed = 0, window;
8638         sector_t max_sectors,j, io_sectors, recovery_done;
8639         unsigned long mark[SYNC_MARKS];
8640         unsigned long update_time;
8641         sector_t mark_cnt[SYNC_MARKS];
8642         int last_mark,m;
8643         struct list_head *tmp;
8644         sector_t last_check;
8645         int skipped = 0;
8646         struct md_rdev *rdev;
8647         char *desc, *action = NULL;
8648         struct blk_plug plug;
8649         int ret;
8650
8651         /* just incase thread restarts... */
8652         if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
8653             test_bit(MD_RECOVERY_WAIT, &mddev->recovery))
8654                 return;
8655         if (mddev->ro) {/* never try to sync a read-only array */
8656                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8657                 return;
8658         }
8659
8660         if (mddev_is_clustered(mddev)) {
8661                 ret = md_cluster_ops->resync_start(mddev);
8662                 if (ret)
8663                         goto skip;
8664
8665                 set_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags);
8666                 if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
8667                         test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ||
8668                         test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
8669                      && ((unsigned long long)mddev->curr_resync_completed
8670                          < (unsigned long long)mddev->resync_max_sectors))
8671                         goto skip;
8672         }
8673
8674         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
8675                 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
8676                         desc = "data-check";
8677                         action = "check";
8678                 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
8679                         desc = "requested-resync";
8680                         action = "repair";
8681                 } else
8682                         desc = "resync";
8683         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
8684                 desc = "reshape";
8685         else
8686                 desc = "recovery";
8687
8688         mddev->last_sync_action = action ?: desc;
8689
8690         /* we overload curr_resync somewhat here.
8691          * 0 == not engaged in resync at all
8692          * 2 == checking that there is no conflict with another sync
8693          * 1 == like 2, but have yielded to allow conflicting resync to
8694          *              commence
8695          * other == active in resync - this many blocks
8696          *
8697          * Before starting a resync we must have set curr_resync to
8698          * 2, and then checked that every "conflicting" array has curr_resync
8699          * less than ours.  When we find one that is the same or higher
8700          * we wait on resync_wait.  To avoid deadlock, we reduce curr_resync
8701          * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
8702          * This will mean we have to start checking from the beginning again.
8703          *
8704          */
8705
8706         do {
8707                 int mddev2_minor = -1;
8708                 mddev->curr_resync = 2;
8709
8710         try_again:
8711                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8712                         goto skip;
8713                 for_each_mddev(mddev2, tmp) {
8714                         if (mddev2 == mddev)
8715                                 continue;
8716                         if (!mddev->parallel_resync
8717                         &&  mddev2->curr_resync
8718                         &&  match_mddev_units(mddev, mddev2)) {
8719                                 DEFINE_WAIT(wq);
8720                                 if (mddev < mddev2 && mddev->curr_resync == 2) {
8721                                         /* arbitrarily yield */
8722                                         mddev->curr_resync = 1;
8723                                         wake_up(&resync_wait);
8724                                 }
8725                                 if (mddev > mddev2 && mddev->curr_resync == 1)
8726                                         /* no need to wait here, we can wait the next
8727                                          * time 'round when curr_resync == 2
8728                                          */
8729                                         continue;
8730                                 /* We need to wait 'interruptible' so as not to
8731                                  * contribute to the load average, and not to
8732                                  * be caught by 'softlockup'
8733                                  */
8734                                 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
8735                                 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
8736                                     mddev2->curr_resync >= mddev->curr_resync) {
8737                                         if (mddev2_minor != mddev2->md_minor) {
8738                                                 mddev2_minor = mddev2->md_minor;
8739                                                 pr_info("md: delaying %s of %s until %s has finished (they share one or more physical units)\n",
8740                                                         desc, mdname(mddev),
8741                                                         mdname(mddev2));
8742                                         }
8743                                         mddev_put(mddev2);
8744                                         if (signal_pending(current))
8745                                                 flush_signals(current);
8746                                         schedule();
8747                                         finish_wait(&resync_wait, &wq);
8748                                         goto try_again;
8749                                 }
8750                                 finish_wait(&resync_wait, &wq);
8751                         }
8752                 }
8753         } while (mddev->curr_resync < 2);
8754
8755         j = 0;
8756         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
8757                 /* resync follows the size requested by the personality,
8758                  * which defaults to physical size, but can be virtual size
8759                  */
8760                 max_sectors = mddev->resync_max_sectors;
8761                 atomic64_set(&mddev->resync_mismatches, 0);
8762                 /* we don't use the checkpoint if there's a bitmap */
8763                 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
8764                         j = mddev->resync_min;
8765                 else if (!mddev->bitmap)
8766                         j = mddev->recovery_cp;
8767
8768         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
8769                 max_sectors = mddev->resync_max_sectors;
8770                 /*
8771                  * If the original node aborts reshaping then we continue the
8772                  * reshaping, so set j again to avoid restart reshape from the
8773                  * first beginning
8774                  */
8775                 if (mddev_is_clustered(mddev) &&
8776                     mddev->reshape_position != MaxSector)
8777                         j = mddev->reshape_position;
8778         } else {
8779                 /* recovery follows the physical size of devices */
8780                 max_sectors = mddev->dev_sectors;
8781                 j = MaxSector;
8782                 rcu_read_lock();
8783                 rdev_for_each_rcu(rdev, mddev)
8784                         if (rdev->raid_disk >= 0 &&
8785                             !test_bit(Journal, &rdev->flags) &&
8786                             !test_bit(Faulty, &rdev->flags) &&
8787                             !test_bit(In_sync, &rdev->flags) &&
8788                             rdev->recovery_offset < j)
8789                                 j = rdev->recovery_offset;
8790                 rcu_read_unlock();
8791
8792                 /* If there is a bitmap, we need to make sure all
8793                  * writes that started before we added a spare
8794                  * complete before we start doing a recovery.
8795                  * Otherwise the write might complete and (via
8796                  * bitmap_endwrite) set a bit in the bitmap after the
8797                  * recovery has checked that bit and skipped that
8798                  * region.
8799                  */
8800                 if (mddev->bitmap) {
8801                         mddev->pers->quiesce(mddev, 1);
8802                         mddev->pers->quiesce(mddev, 0);
8803                 }
8804         }
8805
8806         pr_info("md: %s of RAID array %s\n", desc, mdname(mddev));
8807         pr_debug("md: minimum _guaranteed_  speed: %d KB/sec/disk.\n", speed_min(mddev));
8808         pr_debug("md: using maximum available idle IO bandwidth (but not more than %d KB/sec) for %s.\n",
8809                  speed_max(mddev), desc);
8810
8811         is_mddev_idle(mddev, 1); /* this initializes IO event counters */
8812
8813         io_sectors = 0;
8814         for (m = 0; m < SYNC_MARKS; m++) {
8815                 mark[m] = jiffies;
8816                 mark_cnt[m] = io_sectors;
8817         }
8818         last_mark = 0;
8819         mddev->resync_mark = mark[last_mark];
8820         mddev->resync_mark_cnt = mark_cnt[last_mark];
8821
8822         /*
8823          * Tune reconstruction:
8824          */
8825         window = 32 * (PAGE_SIZE / 512);
8826         pr_debug("md: using %dk window, over a total of %lluk.\n",
8827                  window/2, (unsigned long long)max_sectors/2);
8828
8829         atomic_set(&mddev->recovery_active, 0);
8830         last_check = 0;
8831
8832         if (j>2) {
8833                 pr_debug("md: resuming %s of %s from checkpoint.\n",
8834                          desc, mdname(mddev));
8835                 mddev->curr_resync = j;
8836         } else
8837                 mddev->curr_resync = 3; /* no longer delayed */
8838         mddev->curr_resync_completed = j;
8839         sysfs_notify_dirent_safe(mddev->sysfs_completed);
8840         md_new_event(mddev);
8841         update_time = jiffies;
8842
8843         blk_start_plug(&plug);
8844         while (j < max_sectors) {
8845                 sector_t sectors;
8846
8847                 skipped = 0;
8848
8849                 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8850                     ((mddev->curr_resync > mddev->curr_resync_completed &&
8851                       (mddev->curr_resync - mddev->curr_resync_completed)
8852                       > (max_sectors >> 4)) ||
8853                      time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) ||
8854                      (j - mddev->curr_resync_completed)*2
8855                      >= mddev->resync_max - mddev->curr_resync_completed ||
8856                      mddev->curr_resync_completed > mddev->resync_max
8857                             )) {
8858                         /* time to update curr_resync_completed */
8859                         wait_event(mddev->recovery_wait,
8860                                    atomic_read(&mddev->recovery_active) == 0);
8861                         mddev->curr_resync_completed = j;
8862                         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
8863                             j > mddev->recovery_cp)
8864                                 mddev->recovery_cp = j;
8865                         update_time = jiffies;
8866                         set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
8867                         sysfs_notify_dirent_safe(mddev->sysfs_completed);
8868                 }
8869
8870                 while (j >= mddev->resync_max &&
8871                        !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8872                         /* As this condition is controlled by user-space,
8873                          * we can block indefinitely, so use '_interruptible'
8874                          * to avoid triggering warnings.
8875                          */
8876                         flush_signals(current); /* just in case */
8877                         wait_event_interruptible(mddev->recovery_wait,
8878                                                  mddev->resync_max > j
8879                                                  || test_bit(MD_RECOVERY_INTR,
8880                                                              &mddev->recovery));
8881                 }
8882
8883                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8884                         break;
8885
8886                 sectors = mddev->pers->sync_request(mddev, j, &skipped);
8887                 if (sectors == 0) {
8888                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8889                         break;
8890                 }
8891
8892                 if (!skipped) { /* actual IO requested */
8893                         io_sectors += sectors;
8894                         atomic_add(sectors, &mddev->recovery_active);
8895                 }
8896
8897                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8898                         break;
8899
8900                 j += sectors;
8901                 if (j > max_sectors)
8902                         /* when skipping, extra large numbers can be returned. */
8903                         j = max_sectors;
8904                 if (j > 2)
8905                         mddev->curr_resync = j;
8906                 mddev->curr_mark_cnt = io_sectors;
8907                 if (last_check == 0)
8908                         /* this is the earliest that rebuild will be
8909                          * visible in /proc/mdstat
8910                          */
8911                         md_new_event(mddev);
8912
8913                 if (last_check + window > io_sectors || j == max_sectors)
8914                         continue;
8915
8916                 last_check = io_sectors;
8917         repeat:
8918                 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
8919                         /* step marks */
8920                         int next = (last_mark+1) % SYNC_MARKS;
8921
8922                         mddev->resync_mark = mark[next];
8923                         mddev->resync_mark_cnt = mark_cnt[next];
8924                         mark[next] = jiffies;
8925                         mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
8926                         last_mark = next;
8927                 }
8928
8929                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8930                         break;
8931
8932                 /*
8933                  * this loop exits only if either when we are slower than
8934                  * the 'hard' speed limit, or the system was IO-idle for
8935                  * a jiffy.
8936                  * the system might be non-idle CPU-wise, but we only care
8937                  * about not overloading the IO subsystem. (things like an
8938                  * e2fsck being done on the RAID array should execute fast)
8939                  */
8940                 cond_resched();
8941
8942                 recovery_done = io_sectors - atomic_read(&mddev->recovery_active);
8943                 currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2
8944                         /((jiffies-mddev->resync_mark)/HZ +1) +1;
8945
8946                 if (currspeed > speed_min(mddev)) {
8947                         if (currspeed > speed_max(mddev)) {
8948                                 msleep(500);
8949                                 goto repeat;
8950                         }
8951                         if (!is_mddev_idle(mddev, 0)) {
8952                                 /*
8953                                  * Give other IO more of a chance.
8954                                  * The faster the devices, the less we wait.
8955                                  */
8956                                 wait_event(mddev->recovery_wait,
8957                                            !atomic_read(&mddev->recovery_active));
8958                         }
8959                 }
8960         }
8961         pr_info("md: %s: %s %s.\n",mdname(mddev), desc,
8962                 test_bit(MD_RECOVERY_INTR, &mddev->recovery)
8963                 ? "interrupted" : "done");
8964         /*
8965          * this also signals 'finished resyncing' to md_stop
8966          */
8967         blk_finish_plug(&plug);
8968         wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
8969
8970         if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8971             !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
8972             mddev->curr_resync > 3) {
8973                 mddev->curr_resync_completed = mddev->curr_resync;
8974                 sysfs_notify_dirent_safe(mddev->sysfs_completed);
8975         }
8976         mddev->pers->sync_request(mddev, max_sectors, &skipped);
8977
8978         if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
8979             mddev->curr_resync > 3) {
8980                 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
8981                         if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8982                                 if (mddev->curr_resync >= mddev->recovery_cp) {
8983                                         pr_debug("md: checkpointing %s of %s.\n",
8984                                                  desc, mdname(mddev));
8985                                         if (test_bit(MD_RECOVERY_ERROR,
8986                                                 &mddev->recovery))
8987                                                 mddev->recovery_cp =
8988                                                         mddev->curr_resync_completed;
8989                                         else
8990                                                 mddev->recovery_cp =
8991                                                         mddev->curr_resync;
8992                                 }
8993                         } else
8994                                 mddev->recovery_cp = MaxSector;
8995                 } else {
8996                         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8997                                 mddev->curr_resync = MaxSector;
8998                         if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8999                             test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) {
9000                                 rcu_read_lock();
9001                                 rdev_for_each_rcu(rdev, mddev)
9002                                         if (rdev->raid_disk >= 0 &&
9003                                             mddev->delta_disks >= 0 &&
9004                                             !test_bit(Journal, &rdev->flags) &&
9005                                             !test_bit(Faulty, &rdev->flags) &&
9006                                             !test_bit(In_sync, &rdev->flags) &&
9007                                             rdev->recovery_offset < mddev->curr_resync)
9008                                                 rdev->recovery_offset = mddev->curr_resync;
9009                                 rcu_read_unlock();
9010                         }
9011                 }
9012         }
9013  skip:
9014         /* set CHANGE_PENDING here since maybe another update is needed,
9015          * so other nodes are informed. It should be harmless for normal
9016          * raid */
9017         set_mask_bits(&mddev->sb_flags, 0,
9018                       BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS));
9019
9020         if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
9021                         !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
9022                         mddev->delta_disks > 0 &&
9023                         mddev->pers->finish_reshape &&
9024                         mddev->pers->size &&
9025                         mddev->queue) {
9026                 mddev_lock_nointr(mddev);
9027                 md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
9028                 mddev_unlock(mddev);
9029                 if (!mddev_is_clustered(mddev)) {
9030                         set_capacity(mddev->gendisk, mddev->array_sectors);
9031                         revalidate_disk_size(mddev->gendisk, true);
9032                 }
9033         }
9034
9035         spin_lock(&mddev->lock);
9036         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
9037                 /* We completed so min/max setting can be forgotten if used. */
9038                 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
9039                         mddev->resync_min = 0;
9040                 mddev->resync_max = MaxSector;
9041         } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
9042                 mddev->resync_min = mddev->curr_resync_completed;
9043         set_bit(MD_RECOVERY_DONE, &mddev->recovery);
9044         mddev->curr_resync = 0;
9045         spin_unlock(&mddev->lock);
9046
9047         wake_up(&resync_wait);
9048         md_wakeup_thread(mddev->thread);
9049         return;
9050 }
9051 EXPORT_SYMBOL_GPL(md_do_sync);
9052
9053 static int remove_and_add_spares(struct mddev *mddev,
9054                                  struct md_rdev *this)
9055 {
9056         struct md_rdev *rdev;
9057         int spares = 0;
9058         int removed = 0;
9059         bool remove_some = false;
9060
9061         if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
9062                 /* Mustn't remove devices when resync thread is running */
9063                 return 0;
9064
9065         rdev_for_each(rdev, mddev) {
9066                 if ((this == NULL || rdev == this) &&
9067                     rdev->raid_disk >= 0 &&
9068                     !test_bit(Blocked, &rdev->flags) &&
9069                     test_bit(Faulty, &rdev->flags) &&
9070                     atomic_read(&rdev->nr_pending)==0) {
9071                         /* Faulty non-Blocked devices with nr_pending == 0
9072                          * never get nr_pending incremented,
9073                          * never get Faulty cleared, and never get Blocked set.
9074                          * So we can synchronize_rcu now rather than once per device
9075                          */
9076                         remove_some = true;
9077                         set_bit(RemoveSynchronized, &rdev->flags);
9078                 }
9079         }
9080
9081         if (remove_some)
9082                 synchronize_rcu();
9083         rdev_for_each(rdev, mddev) {
9084                 if ((this == NULL || rdev == this) &&
9085                     rdev->raid_disk >= 0 &&
9086                     !test_bit(Blocked, &rdev->flags) &&
9087                     ((test_bit(RemoveSynchronized, &rdev->flags) ||
9088                      (!test_bit(In_sync, &rdev->flags) &&
9089                       !test_bit(Journal, &rdev->flags))) &&
9090                     atomic_read(&rdev->nr_pending)==0)) {
9091                         if (mddev->pers->hot_remove_disk(
9092                                     mddev, rdev) == 0) {
9093                                 sysfs_unlink_rdev(mddev, rdev);
9094                                 rdev->saved_raid_disk = rdev->raid_disk;
9095                                 rdev->raid_disk = -1;
9096                                 removed++;
9097                         }
9098                 }
9099                 if (remove_some && test_bit(RemoveSynchronized, &rdev->flags))
9100                         clear_bit(RemoveSynchronized, &rdev->flags);
9101         }
9102
9103         if (removed && mddev->kobj.sd)
9104                 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
9105
9106         if (this && removed)
9107                 goto no_add;
9108
9109         rdev_for_each(rdev, mddev) {
9110                 if (this && this != rdev)
9111                         continue;
9112                 if (test_bit(Candidate, &rdev->flags))
9113                         continue;
9114                 if (rdev->raid_disk >= 0 &&
9115                     !test_bit(In_sync, &rdev->flags) &&
9116                     !test_bit(Journal, &rdev->flags) &&
9117                     !test_bit(Faulty, &rdev->flags))
9118                         spares++;
9119                 if (rdev->raid_disk >= 0)
9120                         continue;
9121                 if (test_bit(Faulty, &rdev->flags))
9122                         continue;
9123                 if (!test_bit(Journal, &rdev->flags)) {
9124                         if (mddev->ro &&
9125                             ! (rdev->saved_raid_disk >= 0 &&
9126                                !test_bit(Bitmap_sync, &rdev->flags)))
9127                                 continue;
9128
9129                         rdev->recovery_offset = 0;
9130                 }
9131                 if (mddev->pers->hot_add_disk(mddev, rdev) == 0) {
9132                         /* failure here is OK */
9133                         sysfs_link_rdev(mddev, rdev);
9134                         if (!test_bit(Journal, &rdev->flags))
9135                                 spares++;
9136                         md_new_event(mddev);
9137                         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
9138                 }
9139         }
9140 no_add:
9141         if (removed)
9142                 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
9143         return spares;
9144 }
9145
9146 static void md_start_sync(struct work_struct *ws)
9147 {
9148         struct mddev *mddev = container_of(ws, struct mddev, del_work);
9149
9150         mddev->sync_thread = md_register_thread(md_do_sync,
9151                                                 mddev,
9152                                                 "resync");
9153         if (!mddev->sync_thread) {
9154                 pr_warn("%s: could not start resync thread...\n",
9155                         mdname(mddev));
9156                 /* leave the spares where they are, it shouldn't hurt */
9157                 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9158                 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
9159                 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
9160                 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
9161                 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
9162                 wake_up(&resync_wait);
9163                 if (test_and_clear_bit(MD_RECOVERY_RECOVER,
9164                                        &mddev->recovery))
9165                         if (mddev->sysfs_action)
9166                                 sysfs_notify_dirent_safe(mddev->sysfs_action);
9167         } else
9168                 md_wakeup_thread(mddev->sync_thread);
9169         sysfs_notify_dirent_safe(mddev->sysfs_action);
9170         md_new_event(mddev);
9171 }
9172
9173 /*
9174  * This routine is regularly called by all per-raid-array threads to
9175  * deal with generic issues like resync and super-block update.
9176  * Raid personalities that don't have a thread (linear/raid0) do not
9177  * need this as they never do any recovery or update the superblock.
9178  *
9179  * It does not do any resync itself, but rather "forks" off other threads
9180  * to do that as needed.
9181  * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
9182  * "->recovery" and create a thread at ->sync_thread.
9183  * When the thread finishes it sets MD_RECOVERY_DONE
9184  * and wakeups up this thread which will reap the thread and finish up.
9185  * This thread also removes any faulty devices (with nr_pending == 0).
9186  *
9187  * The overall approach is:
9188  *  1/ if the superblock needs updating, update it.
9189  *  2/ If a recovery thread is running, don't do anything else.
9190  *  3/ If recovery has finished, clean up, possibly marking spares active.
9191  *  4/ If there are any faulty devices, remove them.
9192  *  5/ If array is degraded, try to add spares devices
9193  *  6/ If array has spares or is not in-sync, start a resync thread.
9194  */
9195 void md_check_recovery(struct mddev *mddev)
9196 {
9197         if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) {
9198                 /* Write superblock - thread that called mddev_suspend()
9199                  * holds reconfig_mutex for us.
9200                  */
9201                 set_bit(MD_UPDATING_SB, &mddev->flags);
9202                 smp_mb__after_atomic();
9203                 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags))
9204                         md_update_sb(mddev, 0);
9205                 clear_bit_unlock(MD_UPDATING_SB, &mddev->flags);
9206                 wake_up(&mddev->sb_wait);
9207         }
9208
9209         if (mddev->suspended)
9210                 return;
9211
9212         if (mddev->bitmap)
9213                 md_bitmap_daemon_work(mddev);
9214
9215         if (signal_pending(current)) {
9216                 if (mddev->pers->sync_request && !mddev->external) {
9217                         pr_debug("md: %s in immediate safe mode\n",
9218                                  mdname(mddev));
9219                         mddev->safemode = 2;
9220                 }
9221                 flush_signals(current);
9222         }
9223
9224         if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
9225                 return;
9226         if ( ! (
9227                 (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) ||
9228                 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
9229                 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
9230                 (mddev->external == 0 && mddev->safemode == 1) ||
9231                 (mddev->safemode == 2
9232                  && !mddev->in_sync && mddev->recovery_cp == MaxSector)
9233                 ))
9234                 return;
9235
9236         if (mddev_trylock(mddev)) {
9237                 int spares = 0;
9238                 bool try_set_sync = mddev->safemode != 0;
9239
9240                 if (!mddev->external && mddev->safemode == 1)
9241                         mddev->safemode = 0;
9242
9243                 if (mddev->ro) {
9244                         struct md_rdev *rdev;
9245                         if (!mddev->external && mddev->in_sync)
9246                                 /* 'Blocked' flag not needed as failed devices
9247                                  * will be recorded if array switched to read/write.
9248                                  * Leaving it set will prevent the device
9249                                  * from being removed.
9250                                  */
9251                                 rdev_for_each(rdev, mddev)
9252                                         clear_bit(Blocked, &rdev->flags);
9253                         /* On a read-only array we can:
9254                          * - remove failed devices
9255                          * - add already-in_sync devices if the array itself
9256                          *   is in-sync.
9257                          * As we only add devices that are already in-sync,
9258                          * we can activate the spares immediately.
9259                          */
9260                         remove_and_add_spares(mddev, NULL);
9261                         /* There is no thread, but we need to call
9262                          * ->spare_active and clear saved_raid_disk
9263                          */
9264                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
9265                         md_reap_sync_thread(mddev);
9266                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
9267                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9268                         clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
9269                         goto unlock;
9270                 }
9271
9272                 if (mddev_is_clustered(mddev)) {
9273                         struct md_rdev *rdev, *tmp;
9274                         /* kick the device if another node issued a
9275                          * remove disk.
9276                          */
9277                         rdev_for_each_safe(rdev, tmp, mddev) {
9278                                 if (test_and_clear_bit(ClusterRemove, &rdev->flags) &&
9279                                                 rdev->raid_disk < 0)
9280                                         md_kick_rdev_from_array(rdev);
9281                         }
9282                 }
9283
9284                 if (try_set_sync && !mddev->external && !mddev->in_sync) {
9285                         spin_lock(&mddev->lock);
9286                         set_in_sync(mddev);
9287                         spin_unlock(&mddev->lock);
9288                 }
9289
9290                 if (mddev->sb_flags)
9291                         md_update_sb(mddev, 0);
9292
9293                 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
9294                     !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
9295                         /* resync/recovery still happening */
9296                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9297                         goto unlock;
9298                 }
9299                 if (mddev->sync_thread) {
9300                         md_reap_sync_thread(mddev);
9301                         goto unlock;
9302                 }
9303                 /* Set RUNNING before clearing NEEDED to avoid
9304                  * any transients in the value of "sync_action".
9305                  */
9306                 mddev->curr_resync_completed = 0;
9307                 spin_lock(&mddev->lock);
9308                 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
9309                 spin_unlock(&mddev->lock);
9310                 /* Clear some bits that don't mean anything, but
9311                  * might be left set
9312                  */
9313                 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
9314                 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
9315
9316                 if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
9317                     test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
9318                         goto not_running;
9319                 /* no recovery is running.
9320                  * remove any failed drives, then
9321                  * add spares if possible.
9322                  * Spares are also removed and re-added, to allow
9323                  * the personality to fail the re-add.
9324                  */
9325
9326                 if (mddev->reshape_position != MaxSector) {
9327                         if (mddev->pers->check_reshape == NULL ||
9328                             mddev->pers->check_reshape(mddev) != 0)
9329                                 /* Cannot proceed */
9330                                 goto not_running;
9331                         set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
9332                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
9333                 } else if ((spares = remove_and_add_spares(mddev, NULL))) {
9334                         clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9335                         clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
9336                         clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
9337                         set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
9338                 } else if (mddev->recovery_cp < MaxSector) {
9339                         set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9340                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
9341                 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
9342                         /* nothing to be done ... */
9343                         goto not_running;
9344
9345                 if (mddev->pers->sync_request) {
9346                         if (spares) {
9347                                 /* We are adding a device or devices to an array
9348                                  * which has the bitmap stored on all devices.
9349                                  * So make sure all bitmap pages get written
9350                                  */
9351                                 md_bitmap_write_all(mddev->bitmap);
9352                         }
9353                         INIT_WORK(&mddev->del_work, md_start_sync);
9354                         queue_work(md_misc_wq, &mddev->del_work);
9355                         goto unlock;
9356                 }
9357         not_running:
9358                 if (!mddev->sync_thread) {
9359                         clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
9360                         wake_up(&resync_wait);
9361                         if (test_and_clear_bit(MD_RECOVERY_RECOVER,
9362                                                &mddev->recovery))
9363                                 if (mddev->sysfs_action)
9364                                         sysfs_notify_dirent_safe(mddev->sysfs_action);
9365                 }
9366         unlock:
9367                 wake_up(&mddev->sb_wait);
9368                 mddev_unlock(mddev);
9369         }
9370 }
9371 EXPORT_SYMBOL(md_check_recovery);
9372
9373 void md_reap_sync_thread(struct mddev *mddev)
9374 {
9375         struct md_rdev *rdev;
9376         sector_t old_dev_sectors = mddev->dev_sectors;
9377         bool is_reshaped = false;
9378
9379         /* resync has finished, collect result */
9380         md_unregister_thread(&mddev->sync_thread);
9381         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
9382             !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
9383             mddev->degraded != mddev->raid_disks) {
9384                 /* success...*/
9385                 /* activate any spares */
9386                 if (mddev->pers->spare_active(mddev)) {
9387                         sysfs_notify_dirent_safe(mddev->sysfs_degraded);
9388                         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
9389                 }
9390         }
9391         if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
9392             mddev->pers->finish_reshape) {
9393                 mddev->pers->finish_reshape(mddev);
9394                 if (mddev_is_clustered(mddev))
9395                         is_reshaped = true;
9396         }
9397
9398         /* If array is no-longer degraded, then any saved_raid_disk
9399          * information must be scrapped.
9400          */
9401         if (!mddev->degraded)
9402                 rdev_for_each(rdev, mddev)
9403                         rdev->saved_raid_disk = -1;
9404
9405         md_update_sb(mddev, 1);
9406         /* MD_SB_CHANGE_PENDING should be cleared by md_update_sb, so we can
9407          * call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by
9408          * clustered raid */
9409         if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags))
9410                 md_cluster_ops->resync_finish(mddev);
9411         clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
9412         clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
9413         clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9414         clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
9415         clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
9416         clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
9417         /*
9418          * We call md_cluster_ops->update_size here because sync_size could
9419          * be changed by md_update_sb, and MD_RECOVERY_RESHAPE is cleared,
9420          * so it is time to update size across cluster.
9421          */
9422         if (mddev_is_clustered(mddev) && is_reshaped
9423                                       && !test_bit(MD_CLOSING, &mddev->flags))
9424                 md_cluster_ops->update_size(mddev, old_dev_sectors);
9425         wake_up(&resync_wait);
9426         /* flag recovery needed just to double check */
9427         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9428         sysfs_notify_dirent_safe(mddev->sysfs_completed);
9429         sysfs_notify_dirent_safe(mddev->sysfs_action);
9430         md_new_event(mddev);
9431         if (mddev->event_work.func)
9432                 queue_work(md_misc_wq, &mddev->event_work);
9433 }
9434 EXPORT_SYMBOL(md_reap_sync_thread);
9435
9436 void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
9437 {
9438         sysfs_notify_dirent_safe(rdev->sysfs_state);
9439         wait_event_timeout(rdev->blocked_wait,
9440                            !test_bit(Blocked, &rdev->flags) &&
9441                            !test_bit(BlockedBadBlocks, &rdev->flags),
9442                            msecs_to_jiffies(5000));
9443         rdev_dec_pending(rdev, mddev);
9444 }
9445 EXPORT_SYMBOL(md_wait_for_blocked_rdev);
9446
9447 void md_finish_reshape(struct mddev *mddev)
9448 {
9449         /* called be personality module when reshape completes. */
9450         struct md_rdev *rdev;
9451
9452         rdev_for_each(rdev, mddev) {
9453                 if (rdev->data_offset > rdev->new_data_offset)
9454                         rdev->sectors += rdev->data_offset - rdev->new_data_offset;
9455                 else
9456                         rdev->sectors -= rdev->new_data_offset - rdev->data_offset;
9457                 rdev->data_offset = rdev->new_data_offset;
9458         }
9459 }
9460 EXPORT_SYMBOL(md_finish_reshape);
9461
9462 /* Bad block management */
9463
9464 /* Returns 1 on success, 0 on failure */
9465 int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
9466                        int is_new)
9467 {
9468         struct mddev *mddev = rdev->mddev;
9469         int rv;
9470         if (is_new)
9471                 s += rdev->new_data_offset;
9472         else
9473                 s += rdev->data_offset;
9474         rv = badblocks_set(&rdev->badblocks, s, sectors, 0);
9475         if (rv == 0) {
9476                 /* Make sure they get written out promptly */
9477                 if (test_bit(ExternalBbl, &rdev->flags))
9478                         sysfs_notify_dirent_safe(rdev->sysfs_unack_badblocks);
9479                 sysfs_notify_dirent_safe(rdev->sysfs_state);
9480                 set_mask_bits(&mddev->sb_flags, 0,
9481                               BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING));
9482                 md_wakeup_thread(rdev->mddev->thread);
9483                 return 1;
9484         } else
9485                 return 0;
9486 }
9487 EXPORT_SYMBOL_GPL(rdev_set_badblocks);
9488
9489 int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
9490                          int is_new)
9491 {
9492         int rv;
9493         if (is_new)
9494                 s += rdev->new_data_offset;
9495         else
9496                 s += rdev->data_offset;
9497         rv = badblocks_clear(&rdev->badblocks, s, sectors);
9498         if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags))
9499                 sysfs_notify_dirent_safe(rdev->sysfs_badblocks);
9500         return rv;
9501 }
9502 EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
9503
9504 static int md_notify_reboot(struct notifier_block *this,
9505                             unsigned long code, void *x)
9506 {
9507         struct list_head *tmp;
9508         struct mddev *mddev;
9509         int need_delay = 0;
9510
9511         for_each_mddev(mddev, tmp) {
9512                 if (mddev_trylock(mddev)) {
9513                         if (mddev->pers)
9514                                 __md_stop_writes(mddev);
9515                         if (mddev->persistent)
9516                                 mddev->safemode = 2;
9517                         mddev_unlock(mddev);
9518                 }
9519                 need_delay = 1;
9520         }
9521         /*
9522          * certain more exotic SCSI devices are known to be
9523          * volatile wrt too early system reboots. While the
9524          * right place to handle this issue is the given
9525          * driver, we do want to have a safe RAID driver ...
9526          */
9527         if (need_delay)
9528                 mdelay(1000*1);
9529
9530         return NOTIFY_DONE;
9531 }
9532
9533 static struct notifier_block md_notifier = {
9534         .notifier_call  = md_notify_reboot,
9535         .next           = NULL,
9536         .priority       = INT_MAX, /* before any real devices */
9537 };
9538
9539 static void md_geninit(void)
9540 {
9541         pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
9542
9543         proc_create("mdstat", S_IRUGO, NULL, &mdstat_proc_ops);
9544 }
9545
9546 static int __init md_init(void)
9547 {
9548         int ret = -ENOMEM;
9549
9550         md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
9551         if (!md_wq)
9552                 goto err_wq;
9553
9554         md_misc_wq = alloc_workqueue("md_misc", 0, 0);
9555         if (!md_misc_wq)
9556                 goto err_misc_wq;
9557
9558         md_rdev_misc_wq = alloc_workqueue("md_rdev_misc", 0, 0);
9559         if (!md_rdev_misc_wq)
9560                 goto err_rdev_misc_wq;
9561
9562         if ((ret = register_blkdev(MD_MAJOR, "md")) < 0)
9563                 goto err_md;
9564
9565         if ((ret = register_blkdev(0, "mdp")) < 0)
9566                 goto err_mdp;
9567         mdp_major = ret;
9568
9569         blk_register_region(MKDEV(MD_MAJOR, 0), 512, THIS_MODULE,
9570                             md_probe, NULL, NULL);
9571         blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
9572                             md_probe, NULL, NULL);
9573
9574         register_reboot_notifier(&md_notifier);
9575         raid_table_header = register_sysctl_table(raid_root_table);
9576
9577         md_geninit();
9578         return 0;
9579
9580 err_mdp:
9581         unregister_blkdev(MD_MAJOR, "md");
9582 err_md:
9583         destroy_workqueue(md_rdev_misc_wq);
9584 err_rdev_misc_wq:
9585         destroy_workqueue(md_misc_wq);
9586 err_misc_wq:
9587         destroy_workqueue(md_wq);
9588 err_wq:
9589         return ret;
9590 }
9591
9592 static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
9593 {
9594         struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
9595         struct md_rdev *rdev2, *tmp;
9596         int role, ret;
9597         char b[BDEVNAME_SIZE];
9598
9599         /*
9600          * If size is changed in another node then we need to
9601          * do resize as well.
9602          */
9603         if (mddev->dev_sectors != le64_to_cpu(sb->size)) {
9604                 ret = mddev->pers->resize(mddev, le64_to_cpu(sb->size));
9605                 if (ret)
9606                         pr_info("md-cluster: resize failed\n");
9607                 else
9608                         md_bitmap_update_sb(mddev->bitmap);
9609         }
9610
9611         /* Check for change of roles in the active devices */
9612         rdev_for_each_safe(rdev2, tmp, mddev) {
9613                 if (test_bit(Faulty, &rdev2->flags))
9614                         continue;
9615
9616                 /* Check if the roles changed */
9617                 role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]);
9618
9619                 if (test_bit(Candidate, &rdev2->flags)) {
9620                         if (role == 0xfffe) {
9621                                 pr_info("md: Removing Candidate device %s because add failed\n", bdevname(rdev2->bdev,b));
9622                                 md_kick_rdev_from_array(rdev2);
9623                                 continue;
9624                         }
9625                         else
9626                                 clear_bit(Candidate, &rdev2->flags);
9627                 }
9628
9629                 if (role != rdev2->raid_disk) {
9630                         /*
9631                          * got activated except reshape is happening.
9632                          */
9633                         if (rdev2->raid_disk == -1 && role != 0xffff &&
9634                             !(le32_to_cpu(sb->feature_map) &
9635                               MD_FEATURE_RESHAPE_ACTIVE)) {
9636                                 rdev2->saved_raid_disk = role;
9637                                 ret = remove_and_add_spares(mddev, rdev2);
9638                                 pr_info("Activated spare: %s\n",
9639                                         bdevname(rdev2->bdev,b));
9640                                 /* wakeup mddev->thread here, so array could
9641                                  * perform resync with the new activated disk */
9642                                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9643                                 md_wakeup_thread(mddev->thread);
9644                         }
9645                         /* device faulty
9646                          * We just want to do the minimum to mark the disk
9647                          * as faulty. The recovery is performed by the
9648                          * one who initiated the error.
9649                          */
9650                         if ((role == 0xfffe) || (role == 0xfffd)) {
9651                                 md_error(mddev, rdev2);
9652                                 clear_bit(Blocked, &rdev2->flags);
9653                         }
9654                 }
9655         }
9656
9657         if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) {
9658                 ret = update_raid_disks(mddev, le32_to_cpu(sb->raid_disks));
9659                 if (ret)
9660                         pr_warn("md: updating array disks failed. %d\n", ret);
9661         }
9662
9663         /*
9664          * Since mddev->delta_disks has already updated in update_raid_disks,
9665          * so it is time to check reshape.
9666          */
9667         if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
9668             (le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
9669                 /*
9670                  * reshape is happening in the remote node, we need to
9671                  * update reshape_position and call start_reshape.
9672                  */
9673                 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
9674                 if (mddev->pers->update_reshape_pos)
9675                         mddev->pers->update_reshape_pos(mddev);
9676                 if (mddev->pers->start_reshape)
9677                         mddev->pers->start_reshape(mddev);
9678         } else if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
9679                    mddev->reshape_position != MaxSector &&
9680                    !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
9681                 /* reshape is just done in another node. */
9682                 mddev->reshape_position = MaxSector;
9683                 if (mddev->pers->update_reshape_pos)
9684                         mddev->pers->update_reshape_pos(mddev);
9685         }
9686
9687         /* Finally set the event to be up to date */
9688         mddev->events = le64_to_cpu(sb->events);
9689 }
9690
9691 static int read_rdev(struct mddev *mddev, struct md_rdev *rdev)
9692 {
9693         int err;
9694         struct page *swapout = rdev->sb_page;
9695         struct mdp_superblock_1 *sb;
9696
9697         /* Store the sb page of the rdev in the swapout temporary
9698          * variable in case we err in the future
9699          */
9700         rdev->sb_page = NULL;
9701         err = alloc_disk_sb(rdev);
9702         if (err == 0) {
9703                 ClearPageUptodate(rdev->sb_page);
9704                 rdev->sb_loaded = 0;
9705                 err = super_types[mddev->major_version].
9706                         load_super(rdev, NULL, mddev->minor_version);
9707         }
9708         if (err < 0) {
9709                 pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n",
9710                                 __func__, __LINE__, rdev->desc_nr, err);
9711                 if (rdev->sb_page)
9712                         put_page(rdev->sb_page);
9713                 rdev->sb_page = swapout;
9714                 rdev->sb_loaded = 1;
9715                 return err;
9716         }
9717
9718         sb = page_address(rdev->sb_page);
9719         /* Read the offset unconditionally, even if MD_FEATURE_RECOVERY_OFFSET
9720          * is not set
9721          */
9722
9723         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_OFFSET))
9724                 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
9725
9726         /* The other node finished recovery, call spare_active to set
9727          * device In_sync and mddev->degraded
9728          */
9729         if (rdev->recovery_offset == MaxSector &&
9730             !test_bit(In_sync, &rdev->flags) &&
9731             mddev->pers->spare_active(mddev))
9732                 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
9733
9734         put_page(swapout);
9735         return 0;
9736 }
9737
9738 void md_reload_sb(struct mddev *mddev, int nr)
9739 {
9740         struct md_rdev *rdev = NULL, *iter;
9741         int err;
9742
9743         /* Find the rdev */
9744         rdev_for_each_rcu(iter, mddev) {
9745                 if (iter->desc_nr == nr) {
9746                         rdev = iter;
9747                         break;
9748                 }
9749         }
9750
9751         if (!rdev) {
9752                 pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr);
9753                 return;
9754         }
9755
9756         err = read_rdev(mddev, rdev);
9757         if (err < 0)
9758                 return;
9759
9760         check_sb_changes(mddev, rdev);
9761
9762         /* Read all rdev's to update recovery_offset */
9763         rdev_for_each_rcu(rdev, mddev) {
9764                 if (!test_bit(Faulty, &rdev->flags))
9765                         read_rdev(mddev, rdev);
9766         }
9767 }
9768 EXPORT_SYMBOL(md_reload_sb);
9769
9770 #ifndef MODULE
9771
9772 /*
9773  * Searches all registered partitions for autorun RAID arrays
9774  * at boot time.
9775  */
9776
9777 static DEFINE_MUTEX(detected_devices_mutex);
9778 static LIST_HEAD(all_detected_devices);
9779 struct detected_devices_node {
9780         struct list_head list;
9781         dev_t dev;
9782 };
9783
9784 void md_autodetect_dev(dev_t dev)
9785 {
9786         struct detected_devices_node *node_detected_dev;
9787
9788         node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
9789         if (node_detected_dev) {
9790                 node_detected_dev->dev = dev;
9791                 mutex_lock(&detected_devices_mutex);
9792                 list_add_tail(&node_detected_dev->list, &all_detected_devices);
9793                 mutex_unlock(&detected_devices_mutex);
9794         }
9795 }
9796
9797 void md_autostart_arrays(int part)
9798 {
9799         struct md_rdev *rdev;
9800         struct detected_devices_node *node_detected_dev;
9801         dev_t dev;
9802         int i_scanned, i_passed;
9803
9804         i_scanned = 0;
9805         i_passed = 0;
9806
9807         pr_info("md: Autodetecting RAID arrays.\n");
9808
9809         mutex_lock(&detected_devices_mutex);
9810         while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
9811                 i_scanned++;
9812                 node_detected_dev = list_entry(all_detected_devices.next,
9813                                         struct detected_devices_node, list);
9814                 list_del(&node_detected_dev->list);
9815                 dev = node_detected_dev->dev;
9816                 kfree(node_detected_dev);
9817                 mutex_unlock(&detected_devices_mutex);
9818                 rdev = md_import_device(dev,0, 90);
9819                 mutex_lock(&detected_devices_mutex);
9820                 if (IS_ERR(rdev))
9821                         continue;
9822
9823                 if (test_bit(Faulty, &rdev->flags))
9824                         continue;
9825
9826                 set_bit(AutoDetected, &rdev->flags);
9827                 list_add(&rdev->same_set, &pending_raid_disks);
9828                 i_passed++;
9829         }
9830         mutex_unlock(&detected_devices_mutex);
9831
9832         pr_debug("md: Scanned %d and added %d devices.\n", i_scanned, i_passed);
9833
9834         autorun_devices(part);
9835 }
9836
9837 #endif /* !MODULE */
9838
9839 static __exit void md_exit(void)
9840 {
9841         struct mddev *mddev;
9842         struct list_head *tmp;
9843         int delay = 1;
9844
9845         blk_unregister_region(MKDEV(MD_MAJOR,0), 512);
9846         blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
9847
9848         unregister_blkdev(MD_MAJOR,"md");
9849         unregister_blkdev(mdp_major, "mdp");
9850         unregister_reboot_notifier(&md_notifier);
9851         unregister_sysctl_table(raid_table_header);
9852
9853         /* We cannot unload the modules while some process is
9854          * waiting for us in select() or poll() - wake them up
9855          */
9856         md_unloading = 1;
9857         while (waitqueue_active(&md_event_waiters)) {
9858                 /* not safe to leave yet */
9859                 wake_up(&md_event_waiters);
9860                 msleep(delay);
9861                 delay += delay;
9862         }
9863         remove_proc_entry("mdstat", NULL);
9864
9865         for_each_mddev(mddev, tmp) {
9866                 export_array(mddev);
9867                 mddev->ctime = 0;
9868                 mddev->hold_active = 0;
9869                 /*
9870                  * for_each_mddev() will call mddev_put() at the end of each
9871                  * iteration.  As the mddev is now fully clear, this will
9872                  * schedule the mddev for destruction by a workqueue, and the
9873                  * destroy_workqueue() below will wait for that to complete.
9874                  */
9875         }
9876         destroy_workqueue(md_rdev_misc_wq);
9877         destroy_workqueue(md_misc_wq);
9878         destroy_workqueue(md_wq);
9879 }
9880
9881 subsys_initcall(md_init);
9882 module_exit(md_exit)
9883
9884 static int get_ro(char *buffer, const struct kernel_param *kp)
9885 {
9886         return sprintf(buffer, "%d\n", start_readonly);
9887 }
9888 static int set_ro(const char *val, const struct kernel_param *kp)
9889 {
9890         return kstrtouint(val, 10, (unsigned int *)&start_readonly);
9891 }
9892
9893 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
9894 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
9895 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
9896 module_param(create_on_open, bool, S_IRUSR|S_IWUSR);
9897
9898 MODULE_LICENSE("GPL");
9899 MODULE_DESCRIPTION("MD RAID framework");
9900 MODULE_ALIAS("md");
9901 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);