GNU Linux-libre 4.9.318-gnu1
[releases.git] / drivers / md / md.c
1 /*
2    md.c : Multiple Devices driver for Linux
3      Copyright (C) 1998, 1999, 2000 Ingo Molnar
4
5      completely rewritten, based on the MD driver code from Marc Zyngier
6
7    Changes:
8
9    - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10    - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11    - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12    - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13    - kmod support by: Cyrus Durgin
14    - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15    - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
16
17    - lots of fixes and improvements to the RAID1/RAID5 and generic
18      RAID code (such as request based resynchronization):
19
20      Neil Brown <neilb@cse.unsw.edu.au>.
21
22    - persistent bitmap code
23      Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
24
25    This program is free software; you can redistribute it and/or modify
26    it under the terms of the GNU General Public License as published by
27    the Free Software Foundation; either version 2, or (at your option)
28    any later version.
29
30    You should have received a copy of the GNU General Public License
31    (for example /usr/src/linux/COPYING); if not, write to the Free
32    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
33 */
34
35 #include <linux/kthread.h>
36 #include <linux/blkdev.h>
37 #include <linux/badblocks.h>
38 #include <linux/sysctl.h>
39 #include <linux/seq_file.h>
40 #include <linux/fs.h>
41 #include <linux/poll.h>
42 #include <linux/ctype.h>
43 #include <linux/string.h>
44 #include <linux/hdreg.h>
45 #include <linux/proc_fs.h>
46 #include <linux/random.h>
47 #include <linux/module.h>
48 #include <linux/reboot.h>
49 #include <linux/file.h>
50 #include <linux/compat.h>
51 #include <linux/delay.h>
52 #include <linux/raid/md_p.h>
53 #include <linux/raid/md_u.h>
54 #include <linux/slab.h>
55 #include "md.h"
56 #include "bitmap.h"
57 #include "md-cluster.h"
58
59 #ifndef MODULE
60 static void autostart_arrays(int part);
61 #endif
62
63 /* pers_list is a list of registered personalities protected
64  * by pers_lock.
65  * pers_lock does extra service to protect accesses to
66  * mddev->thread when the mutex cannot be held.
67  */
68 static LIST_HEAD(pers_list);
69 static DEFINE_SPINLOCK(pers_lock);
70
71 struct md_cluster_operations *md_cluster_ops;
72 EXPORT_SYMBOL(md_cluster_ops);
73 struct module *md_cluster_mod;
74 EXPORT_SYMBOL(md_cluster_mod);
75
76 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
77 static struct workqueue_struct *md_wq;
78 static struct workqueue_struct *md_misc_wq;
79
80 static int remove_and_add_spares(struct mddev *mddev,
81                                  struct md_rdev *this);
82 static void mddev_detach(struct mddev *mddev);
83
84 /*
85  * Default number of read corrections we'll attempt on an rdev
86  * before ejecting it from the array. We divide the read error
87  * count by 2 for every hour elapsed between read errors.
88  */
89 #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
90 /*
91  * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
92  * is 1000 KB/sec, so the extra system load does not show up that much.
93  * Increase it if you want to have more _guaranteed_ speed. Note that
94  * the RAID driver will use the maximum available bandwidth if the IO
95  * subsystem is idle. There is also an 'absolute maximum' reconstruction
96  * speed limit - in case reconstruction slows down your system despite
97  * idle IO detection.
98  *
99  * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
100  * or /sys/block/mdX/md/sync_speed_{min,max}
101  */
102
103 static int sysctl_speed_limit_min = 1000;
104 static int sysctl_speed_limit_max = 200000;
105 static inline int speed_min(struct mddev *mddev)
106 {
107         return mddev->sync_speed_min ?
108                 mddev->sync_speed_min : sysctl_speed_limit_min;
109 }
110
111 static inline int speed_max(struct mddev *mddev)
112 {
113         return mddev->sync_speed_max ?
114                 mddev->sync_speed_max : sysctl_speed_limit_max;
115 }
116
117 static struct ctl_table_header *raid_table_header;
118
119 static struct ctl_table raid_table[] = {
120         {
121                 .procname       = "speed_limit_min",
122                 .data           = &sysctl_speed_limit_min,
123                 .maxlen         = sizeof(int),
124                 .mode           = S_IRUGO|S_IWUSR,
125                 .proc_handler   = proc_dointvec,
126         },
127         {
128                 .procname       = "speed_limit_max",
129                 .data           = &sysctl_speed_limit_max,
130                 .maxlen         = sizeof(int),
131                 .mode           = S_IRUGO|S_IWUSR,
132                 .proc_handler   = proc_dointvec,
133         },
134         { }
135 };
136
137 static struct ctl_table raid_dir_table[] = {
138         {
139                 .procname       = "raid",
140                 .maxlen         = 0,
141                 .mode           = S_IRUGO|S_IXUGO,
142                 .child          = raid_table,
143         },
144         { }
145 };
146
147 static struct ctl_table raid_root_table[] = {
148         {
149                 .procname       = "dev",
150                 .maxlen         = 0,
151                 .mode           = 0555,
152                 .child          = raid_dir_table,
153         },
154         {  }
155 };
156
157 static const struct block_device_operations md_fops;
158
159 static int start_readonly;
160
161 /* bio_clone_mddev
162  * like bio_clone, but with a local bio set
163  */
164
165 struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
166                             struct mddev *mddev)
167 {
168         struct bio *b;
169
170         if (!mddev || !mddev->bio_set)
171                 return bio_alloc(gfp_mask, nr_iovecs);
172
173         b = bio_alloc_bioset(gfp_mask, nr_iovecs, mddev->bio_set);
174         if (!b)
175                 return NULL;
176         return b;
177 }
178 EXPORT_SYMBOL_GPL(bio_alloc_mddev);
179
180 struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
181                             struct mddev *mddev)
182 {
183         if (!mddev || !mddev->bio_set)
184                 return bio_clone(bio, gfp_mask);
185
186         return bio_clone_bioset(bio, gfp_mask, mddev->bio_set);
187 }
188 EXPORT_SYMBOL_GPL(bio_clone_mddev);
189
190 /*
191  * We have a system wide 'event count' that is incremented
192  * on any 'interesting' event, and readers of /proc/mdstat
193  * can use 'poll' or 'select' to find out when the event
194  * count increases.
195  *
196  * Events are:
197  *  start array, stop array, error, add device, remove device,
198  *  start build, activate spare
199  */
200 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
201 static atomic_t md_event_count;
202 void md_new_event(struct mddev *mddev)
203 {
204         atomic_inc(&md_event_count);
205         wake_up(&md_event_waiters);
206 }
207 EXPORT_SYMBOL_GPL(md_new_event);
208
209 /*
210  * Enables to iterate over all existing md arrays
211  * all_mddevs_lock protects this list.
212  */
213 static LIST_HEAD(all_mddevs);
214 static DEFINE_SPINLOCK(all_mddevs_lock);
215
216 /*
217  * iterates through all used mddevs in the system.
218  * We take care to grab the all_mddevs_lock whenever navigating
219  * the list, and to always hold a refcount when unlocked.
220  * Any code which breaks out of this loop while own
221  * a reference to the current mddev and must mddev_put it.
222  */
223 #define for_each_mddev(_mddev,_tmp)                                     \
224                                                                         \
225         for (({ spin_lock(&all_mddevs_lock);                            \
226                 _tmp = all_mddevs.next;                                 \
227                 _mddev = NULL;});                                       \
228              ({ if (_tmp != &all_mddevs)                                \
229                         mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
230                 spin_unlock(&all_mddevs_lock);                          \
231                 if (_mddev) mddev_put(_mddev);                          \
232                 _mddev = list_entry(_tmp, struct mddev, all_mddevs);    \
233                 _tmp != &all_mddevs;});                                 \
234              ({ spin_lock(&all_mddevs_lock);                            \
235                 _tmp = _tmp->next;})                                    \
236                 )
237
238 /* Rather than calling directly into the personality make_request function,
239  * IO requests come here first so that we can check if the device is
240  * being suspended pending a reconfiguration.
241  * We hold a refcount over the call to ->make_request.  By the time that
242  * call has finished, the bio has been linked into some internal structure
243  * and so is visible to ->quiesce(), so we don't need the refcount any more.
244  */
245 static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
246 {
247         const int rw = bio_data_dir(bio);
248         struct mddev *mddev = q->queuedata;
249         unsigned int sectors;
250         int cpu;
251
252         blk_queue_split(q, &bio, q->bio_split);
253
254         if (mddev == NULL || mddev->pers == NULL) {
255                 bio_io_error(bio);
256                 return BLK_QC_T_NONE;
257         }
258         if (mddev->ro == 1 && unlikely(rw == WRITE)) {
259                 if (bio_sectors(bio) != 0)
260                         bio->bi_error = -EROFS;
261                 bio_endio(bio);
262                 return BLK_QC_T_NONE;
263         }
264         smp_rmb(); /* Ensure implications of  'active' are visible */
265         rcu_read_lock();
266         if (mddev->suspended) {
267                 DEFINE_WAIT(__wait);
268                 for (;;) {
269                         prepare_to_wait(&mddev->sb_wait, &__wait,
270                                         TASK_UNINTERRUPTIBLE);
271                         if (!mddev->suspended)
272                                 break;
273                         rcu_read_unlock();
274                         schedule();
275                         rcu_read_lock();
276                 }
277                 finish_wait(&mddev->sb_wait, &__wait);
278         }
279         atomic_inc(&mddev->active_io);
280         rcu_read_unlock();
281
282         /*
283          * save the sectors now since our bio can
284          * go away inside make_request
285          */
286         sectors = bio_sectors(bio);
287         /* bio could be mergeable after passing to underlayer */
288         bio->bi_opf &= ~REQ_NOMERGE;
289         mddev->pers->make_request(mddev, bio);
290
291         cpu = part_stat_lock();
292         part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
293         part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
294         part_stat_unlock();
295
296         if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
297                 wake_up(&mddev->sb_wait);
298
299         return BLK_QC_T_NONE;
300 }
301
302 /* mddev_suspend makes sure no new requests are submitted
303  * to the device, and that any requests that have been submitted
304  * are completely handled.
305  * Once mddev_detach() is called and completes, the module will be
306  * completely unused.
307  */
308 void mddev_suspend(struct mddev *mddev)
309 {
310         WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk);
311         if (mddev->suspended++)
312                 return;
313         synchronize_rcu();
314         wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
315         mddev->pers->quiesce(mddev, 1);
316
317         del_timer_sync(&mddev->safemode_timer);
318 }
319 EXPORT_SYMBOL_GPL(mddev_suspend);
320
321 void mddev_resume(struct mddev *mddev)
322 {
323         if (--mddev->suspended)
324                 return;
325         wake_up(&mddev->sb_wait);
326         mddev->pers->quiesce(mddev, 0);
327
328         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
329         md_wakeup_thread(mddev->thread);
330         md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
331 }
332 EXPORT_SYMBOL_GPL(mddev_resume);
333
334 int mddev_congested(struct mddev *mddev, int bits)
335 {
336         struct md_personality *pers = mddev->pers;
337         int ret = 0;
338
339         rcu_read_lock();
340         if (mddev->suspended)
341                 ret = 1;
342         else if (pers && pers->congested)
343                 ret = pers->congested(mddev, bits);
344         rcu_read_unlock();
345         return ret;
346 }
347 EXPORT_SYMBOL_GPL(mddev_congested);
348 static int md_congested(void *data, int bits)
349 {
350         struct mddev *mddev = data;
351         return mddev_congested(mddev, bits);
352 }
353
354 /*
355  * Generic flush handling for md
356  */
357
358 static void md_end_flush(struct bio *bio)
359 {
360         struct md_rdev *rdev = bio->bi_private;
361         struct mddev *mddev = rdev->mddev;
362
363         rdev_dec_pending(rdev, mddev);
364
365         if (atomic_dec_and_test(&mddev->flush_pending)) {
366                 /* The pre-request flush has finished */
367                 queue_work(md_wq, &mddev->flush_work);
368         }
369         bio_put(bio);
370 }
371
372 static void md_submit_flush_data(struct work_struct *ws);
373
374 static void submit_flushes(struct work_struct *ws)
375 {
376         struct mddev *mddev = container_of(ws, struct mddev, flush_work);
377         struct md_rdev *rdev;
378
379         INIT_WORK(&mddev->flush_work, md_submit_flush_data);
380         atomic_set(&mddev->flush_pending, 1);
381         rcu_read_lock();
382         rdev_for_each_rcu(rdev, mddev)
383                 if (rdev->raid_disk >= 0 &&
384                     !test_bit(Faulty, &rdev->flags)) {
385                         /* Take two references, one is dropped
386                          * when request finishes, one after
387                          * we reclaim rcu_read_lock
388                          */
389                         struct bio *bi;
390                         atomic_inc(&rdev->nr_pending);
391                         atomic_inc(&rdev->nr_pending);
392                         rcu_read_unlock();
393                         bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
394                         bi->bi_end_io = md_end_flush;
395                         bi->bi_private = rdev;
396                         bi->bi_bdev = rdev->bdev;
397                         bio_set_op_attrs(bi, REQ_OP_WRITE, WRITE_FLUSH);
398                         atomic_inc(&mddev->flush_pending);
399                         submit_bio(bi);
400                         rcu_read_lock();
401                         rdev_dec_pending(rdev, mddev);
402                 }
403         rcu_read_unlock();
404         if (atomic_dec_and_test(&mddev->flush_pending))
405                 queue_work(md_wq, &mddev->flush_work);
406 }
407
408 static void md_submit_flush_data(struct work_struct *ws)
409 {
410         struct mddev *mddev = container_of(ws, struct mddev, flush_work);
411         struct bio *bio = mddev->flush_bio;
412
413         if (bio->bi_iter.bi_size == 0)
414                 /* an empty barrier - all done */
415                 bio_endio(bio);
416         else {
417                 bio->bi_opf &= ~REQ_PREFLUSH;
418                 mddev->pers->make_request(mddev, bio);
419         }
420
421         mddev->flush_bio = NULL;
422         wake_up(&mddev->sb_wait);
423 }
424
425 void md_flush_request(struct mddev *mddev, struct bio *bio)
426 {
427         spin_lock_irq(&mddev->lock);
428         wait_event_lock_irq(mddev->sb_wait,
429                             !mddev->flush_bio,
430                             mddev->lock);
431         mddev->flush_bio = bio;
432         spin_unlock_irq(&mddev->lock);
433
434         INIT_WORK(&mddev->flush_work, submit_flushes);
435         queue_work(md_wq, &mddev->flush_work);
436 }
437 EXPORT_SYMBOL(md_flush_request);
438
439 void md_unplug(struct blk_plug_cb *cb, bool from_schedule)
440 {
441         struct mddev *mddev = cb->data;
442         md_wakeup_thread(mddev->thread);
443         kfree(cb);
444 }
445 EXPORT_SYMBOL(md_unplug);
446
447 static inline struct mddev *mddev_get(struct mddev *mddev)
448 {
449         atomic_inc(&mddev->active);
450         return mddev;
451 }
452
453 static void mddev_delayed_delete(struct work_struct *ws);
454
455 static void mddev_put(struct mddev *mddev)
456 {
457         struct bio_set *bs = NULL;
458
459         if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
460                 return;
461         if (!mddev->raid_disks && list_empty(&mddev->disks) &&
462             mddev->ctime == 0 && !mddev->hold_active) {
463                 /* Array is not configured at all, and not held active,
464                  * so destroy it */
465                 list_del_init(&mddev->all_mddevs);
466                 bs = mddev->bio_set;
467                 mddev->bio_set = NULL;
468                 if (mddev->gendisk) {
469                         /* We did a probe so need to clean up.  Call
470                          * queue_work inside the spinlock so that
471                          * flush_workqueue() after mddev_find will
472                          * succeed in waiting for the work to be done.
473                          */
474                         INIT_WORK(&mddev->del_work, mddev_delayed_delete);
475                         queue_work(md_misc_wq, &mddev->del_work);
476                 } else
477                         kfree(mddev);
478         }
479         spin_unlock(&all_mddevs_lock);
480         if (bs)
481                 bioset_free(bs);
482 }
483
484 static void md_safemode_timeout(unsigned long data);
485
486 void mddev_init(struct mddev *mddev)
487 {
488         mutex_init(&mddev->open_mutex);
489         mutex_init(&mddev->reconfig_mutex);
490         mutex_init(&mddev->bitmap_info.mutex);
491         INIT_LIST_HEAD(&mddev->disks);
492         INIT_LIST_HEAD(&mddev->all_mddevs);
493         setup_timer(&mddev->safemode_timer, md_safemode_timeout,
494                     (unsigned long) mddev);
495         atomic_set(&mddev->active, 1);
496         atomic_set(&mddev->openers, 0);
497         atomic_set(&mddev->active_io, 0);
498         spin_lock_init(&mddev->lock);
499         atomic_set(&mddev->flush_pending, 0);
500         init_waitqueue_head(&mddev->sb_wait);
501         init_waitqueue_head(&mddev->recovery_wait);
502         mddev->reshape_position = MaxSector;
503         mddev->reshape_backwards = 0;
504         mddev->last_sync_action = "none";
505         mddev->resync_min = 0;
506         mddev->resync_max = MaxSector;
507         mddev->level = LEVEL_NONE;
508 }
509 EXPORT_SYMBOL_GPL(mddev_init);
510
511 static struct mddev *mddev_find_locked(dev_t unit)
512 {
513         struct mddev *mddev;
514
515         list_for_each_entry(mddev, &all_mddevs, all_mddevs)
516                 if (mddev->unit == unit)
517                         return mddev;
518
519         return NULL;
520 }
521
522 static struct mddev *mddev_find(dev_t unit)
523 {
524         struct mddev *mddev, *new = NULL;
525
526         if (unit && MAJOR(unit) != MD_MAJOR)
527                 unit &= ~((1<<MdpMinorShift)-1);
528
529  retry:
530         spin_lock(&all_mddevs_lock);
531
532         if (unit) {
533                 mddev = mddev_find_locked(unit);
534                 if (mddev) {
535                         mddev_get(mddev);
536                         spin_unlock(&all_mddevs_lock);
537                         kfree(new);
538                         return mddev;
539                 }
540
541                 if (new) {
542                         list_add(&new->all_mddevs, &all_mddevs);
543                         spin_unlock(&all_mddevs_lock);
544                         new->hold_active = UNTIL_IOCTL;
545                         return new;
546                 }
547         } else if (new) {
548                 /* find an unused unit number */
549                 static int next_minor = 512;
550                 int start = next_minor;
551                 int is_free = 0;
552                 int dev = 0;
553                 while (!is_free) {
554                         dev = MKDEV(MD_MAJOR, next_minor);
555                         next_minor++;
556                         if (next_minor > MINORMASK)
557                                 next_minor = 0;
558                         if (next_minor == start) {
559                                 /* Oh dear, all in use. */
560                                 spin_unlock(&all_mddevs_lock);
561                                 kfree(new);
562                                 return NULL;
563                         }
564
565                         is_free = !mddev_find_locked(dev);
566                 }
567                 new->unit = dev;
568                 new->md_minor = MINOR(dev);
569                 new->hold_active = UNTIL_STOP;
570                 list_add(&new->all_mddevs, &all_mddevs);
571                 spin_unlock(&all_mddevs_lock);
572                 return new;
573         }
574         spin_unlock(&all_mddevs_lock);
575
576         new = kzalloc(sizeof(*new), GFP_KERNEL);
577         if (!new)
578                 return NULL;
579
580         new->unit = unit;
581         if (MAJOR(unit) == MD_MAJOR)
582                 new->md_minor = MINOR(unit);
583         else
584                 new->md_minor = MINOR(unit) >> MdpMinorShift;
585
586         mddev_init(new);
587
588         goto retry;
589 }
590
591 static struct attribute_group md_redundancy_group;
592
593 void mddev_unlock(struct mddev *mddev)
594 {
595         if (mddev->to_remove) {
596                 /* These cannot be removed under reconfig_mutex as
597                  * an access to the files will try to take reconfig_mutex
598                  * while holding the file unremovable, which leads to
599                  * a deadlock.
600                  * So hold set sysfs_active while the remove in happeing,
601                  * and anything else which might set ->to_remove or my
602                  * otherwise change the sysfs namespace will fail with
603                  * -EBUSY if sysfs_active is still set.
604                  * We set sysfs_active under reconfig_mutex and elsewhere
605                  * test it under the same mutex to ensure its correct value
606                  * is seen.
607                  */
608                 struct attribute_group *to_remove = mddev->to_remove;
609                 mddev->to_remove = NULL;
610                 mddev->sysfs_active = 1;
611                 mutex_unlock(&mddev->reconfig_mutex);
612
613                 if (mddev->kobj.sd) {
614                         if (to_remove != &md_redundancy_group)
615                                 sysfs_remove_group(&mddev->kobj, to_remove);
616                         if (mddev->pers == NULL ||
617                             mddev->pers->sync_request == NULL) {
618                                 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
619                                 if (mddev->sysfs_action)
620                                         sysfs_put(mddev->sysfs_action);
621                                 mddev->sysfs_action = NULL;
622                         }
623                 }
624                 mddev->sysfs_active = 0;
625         } else
626                 mutex_unlock(&mddev->reconfig_mutex);
627
628         /* As we've dropped the mutex we need a spinlock to
629          * make sure the thread doesn't disappear
630          */
631         spin_lock(&pers_lock);
632         md_wakeup_thread(mddev->thread);
633         spin_unlock(&pers_lock);
634 }
635 EXPORT_SYMBOL_GPL(mddev_unlock);
636
637 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr)
638 {
639         struct md_rdev *rdev;
640
641         rdev_for_each_rcu(rdev, mddev)
642                 if (rdev->desc_nr == nr)
643                         return rdev;
644
645         return NULL;
646 }
647 EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu);
648
649 static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
650 {
651         struct md_rdev *rdev;
652
653         rdev_for_each(rdev, mddev)
654                 if (rdev->bdev->bd_dev == dev)
655                         return rdev;
656
657         return NULL;
658 }
659
660 static struct md_rdev *find_rdev_rcu(struct mddev *mddev, dev_t dev)
661 {
662         struct md_rdev *rdev;
663
664         rdev_for_each_rcu(rdev, mddev)
665                 if (rdev->bdev->bd_dev == dev)
666                         return rdev;
667
668         return NULL;
669 }
670
671 static struct md_personality *find_pers(int level, char *clevel)
672 {
673         struct md_personality *pers;
674         list_for_each_entry(pers, &pers_list, list) {
675                 if (level != LEVEL_NONE && pers->level == level)
676                         return pers;
677                 if (strcmp(pers->name, clevel)==0)
678                         return pers;
679         }
680         return NULL;
681 }
682
683 /* return the offset of the super block in 512byte sectors */
684 static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
685 {
686         sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512;
687         return MD_NEW_SIZE_SECTORS(num_sectors);
688 }
689
690 static int alloc_disk_sb(struct md_rdev *rdev)
691 {
692         rdev->sb_page = alloc_page(GFP_KERNEL);
693         if (!rdev->sb_page) {
694                 printk(KERN_ALERT "md: out of memory.\n");
695                 return -ENOMEM;
696         }
697
698         return 0;
699 }
700
701 void md_rdev_clear(struct md_rdev *rdev)
702 {
703         if (rdev->sb_page) {
704                 put_page(rdev->sb_page);
705                 rdev->sb_loaded = 0;
706                 rdev->sb_page = NULL;
707                 rdev->sb_start = 0;
708                 rdev->sectors = 0;
709         }
710         if (rdev->bb_page) {
711                 put_page(rdev->bb_page);
712                 rdev->bb_page = NULL;
713         }
714         badblocks_exit(&rdev->badblocks);
715 }
716 EXPORT_SYMBOL_GPL(md_rdev_clear);
717
718 static void super_written(struct bio *bio)
719 {
720         struct md_rdev *rdev = bio->bi_private;
721         struct mddev *mddev = rdev->mddev;
722
723         if (bio->bi_error) {
724                 printk("md: super_written gets error=%d\n", bio->bi_error);
725                 md_error(mddev, rdev);
726         }
727
728         if (atomic_dec_and_test(&mddev->pending_writes))
729                 wake_up(&mddev->sb_wait);
730         rdev_dec_pending(rdev, mddev);
731         bio_put(bio);
732 }
733
734 void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
735                    sector_t sector, int size, struct page *page)
736 {
737         /* write first size bytes of page to sector of rdev
738          * Increment mddev->pending_writes before returning
739          * and decrement it on completion, waking up sb_wait
740          * if zero is reached.
741          * If an error occurred, call md_error
742          */
743         struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
744
745         atomic_inc(&rdev->nr_pending);
746
747         bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
748         bio->bi_iter.bi_sector = sector;
749         bio_add_page(bio, page, size, 0);
750         bio->bi_private = rdev;
751         bio->bi_end_io = super_written;
752         bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH_FUA);
753
754         atomic_inc(&mddev->pending_writes);
755         submit_bio(bio);
756 }
757
758 void md_super_wait(struct mddev *mddev)
759 {
760         /* wait for all superblock writes that were scheduled to complete */
761         wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
762 }
763
764 int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
765                  struct page *page, int op, int op_flags, bool metadata_op)
766 {
767         struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
768         int ret;
769
770         bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
771                 rdev->meta_bdev : rdev->bdev;
772         bio_set_op_attrs(bio, op, op_flags);
773         if (metadata_op)
774                 bio->bi_iter.bi_sector = sector + rdev->sb_start;
775         else if (rdev->mddev->reshape_position != MaxSector &&
776                  (rdev->mddev->reshape_backwards ==
777                   (sector >= rdev->mddev->reshape_position)))
778                 bio->bi_iter.bi_sector = sector + rdev->new_data_offset;
779         else
780                 bio->bi_iter.bi_sector = sector + rdev->data_offset;
781         bio_add_page(bio, page, size, 0);
782
783         submit_bio_wait(bio);
784
785         ret = !bio->bi_error;
786         bio_put(bio);
787         return ret;
788 }
789 EXPORT_SYMBOL_GPL(sync_page_io);
790
791 static int read_disk_sb(struct md_rdev *rdev, int size)
792 {
793         char b[BDEVNAME_SIZE];
794
795         if (rdev->sb_loaded)
796                 return 0;
797
798         if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true))
799                 goto fail;
800         rdev->sb_loaded = 1;
801         return 0;
802
803 fail:
804         printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
805                 bdevname(rdev->bdev,b));
806         return -EINVAL;
807 }
808
809 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
810 {
811         return  sb1->set_uuid0 == sb2->set_uuid0 &&
812                 sb1->set_uuid1 == sb2->set_uuid1 &&
813                 sb1->set_uuid2 == sb2->set_uuid2 &&
814                 sb1->set_uuid3 == sb2->set_uuid3;
815 }
816
817 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
818 {
819         int ret;
820         mdp_super_t *tmp1, *tmp2;
821
822         tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
823         tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
824
825         if (!tmp1 || !tmp2) {
826                 ret = 0;
827                 printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n");
828                 goto abort;
829         }
830
831         *tmp1 = *sb1;
832         *tmp2 = *sb2;
833
834         /*
835          * nr_disks is not constant
836          */
837         tmp1->nr_disks = 0;
838         tmp2->nr_disks = 0;
839
840         ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
841 abort:
842         kfree(tmp1);
843         kfree(tmp2);
844         return ret;
845 }
846
847 static u32 md_csum_fold(u32 csum)
848 {
849         csum = (csum & 0xffff) + (csum >> 16);
850         return (csum & 0xffff) + (csum >> 16);
851 }
852
853 static unsigned int calc_sb_csum(mdp_super_t *sb)
854 {
855         u64 newcsum = 0;
856         u32 *sb32 = (u32*)sb;
857         int i;
858         unsigned int disk_csum, csum;
859
860         disk_csum = sb->sb_csum;
861         sb->sb_csum = 0;
862
863         for (i = 0; i < MD_SB_BYTES/4 ; i++)
864                 newcsum += sb32[i];
865         csum = (newcsum & 0xffffffff) + (newcsum>>32);
866
867 #ifdef CONFIG_ALPHA
868         /* This used to use csum_partial, which was wrong for several
869          * reasons including that different results are returned on
870          * different architectures.  It isn't critical that we get exactly
871          * the same return value as before (we always csum_fold before
872          * testing, and that removes any differences).  However as we
873          * know that csum_partial always returned a 16bit value on
874          * alphas, do a fold to maximise conformity to previous behaviour.
875          */
876         sb->sb_csum = md_csum_fold(disk_csum);
877 #else
878         sb->sb_csum = disk_csum;
879 #endif
880         return csum;
881 }
882
883 /*
884  * Handle superblock details.
885  * We want to be able to handle multiple superblock formats
886  * so we have a common interface to them all, and an array of
887  * different handlers.
888  * We rely on user-space to write the initial superblock, and support
889  * reading and updating of superblocks.
890  * Interface methods are:
891  *   int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version)
892  *      loads and validates a superblock on dev.
893  *      if refdev != NULL, compare superblocks on both devices
894  *    Return:
895  *      0 - dev has a superblock that is compatible with refdev
896  *      1 - dev has a superblock that is compatible and newer than refdev
897  *          so dev should be used as the refdev in future
898  *     -EINVAL superblock incompatible or invalid
899  *     -othererror e.g. -EIO
900  *
901  *   int validate_super(struct mddev *mddev, struct md_rdev *dev)
902  *      Verify that dev is acceptable into mddev.
903  *       The first time, mddev->raid_disks will be 0, and data from
904  *       dev should be merged in.  Subsequent calls check that dev
905  *       is new enough.  Return 0 or -EINVAL
906  *
907  *   void sync_super(struct mddev *mddev, struct md_rdev *dev)
908  *     Update the superblock for rdev with data in mddev
909  *     This does not write to disc.
910  *
911  */
912
913 struct super_type  {
914         char                *name;
915         struct module       *owner;
916         int                 (*load_super)(struct md_rdev *rdev,
917                                           struct md_rdev *refdev,
918                                           int minor_version);
919         int                 (*validate_super)(struct mddev *mddev,
920                                               struct md_rdev *rdev);
921         void                (*sync_super)(struct mddev *mddev,
922                                           struct md_rdev *rdev);
923         unsigned long long  (*rdev_size_change)(struct md_rdev *rdev,
924                                                 sector_t num_sectors);
925         int                 (*allow_new_offset)(struct md_rdev *rdev,
926                                                 unsigned long long new_offset);
927 };
928
929 /*
930  * Check that the given mddev has no bitmap.
931  *
932  * This function is called from the run method of all personalities that do not
933  * support bitmaps. It prints an error message and returns non-zero if mddev
934  * has a bitmap. Otherwise, it returns 0.
935  *
936  */
937 int md_check_no_bitmap(struct mddev *mddev)
938 {
939         if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
940                 return 0;
941         printk(KERN_ERR "%s: bitmaps are not supported for %s\n",
942                 mdname(mddev), mddev->pers->name);
943         return 1;
944 }
945 EXPORT_SYMBOL(md_check_no_bitmap);
946
947 /*
948  * load_super for 0.90.0
949  */
950 static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
951 {
952         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
953         mdp_super_t *sb;
954         int ret;
955
956         /*
957          * Calculate the position of the superblock (512byte sectors),
958          * it's at the end of the disk.
959          *
960          * It also happens to be a multiple of 4Kb.
961          */
962         rdev->sb_start = calc_dev_sboffset(rdev);
963
964         ret = read_disk_sb(rdev, MD_SB_BYTES);
965         if (ret) return ret;
966
967         ret = -EINVAL;
968
969         bdevname(rdev->bdev, b);
970         sb = page_address(rdev->sb_page);
971
972         if (sb->md_magic != MD_SB_MAGIC) {
973                 printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
974                        b);
975                 goto abort;
976         }
977
978         if (sb->major_version != 0 ||
979             sb->minor_version < 90 ||
980             sb->minor_version > 91) {
981                 printk(KERN_WARNING "Bad version number %d.%d on %s\n",
982                         sb->major_version, sb->minor_version,
983                         b);
984                 goto abort;
985         }
986
987         if (sb->raid_disks <= 0)
988                 goto abort;
989
990         if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
991                 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
992                         b);
993                 goto abort;
994         }
995
996         rdev->preferred_minor = sb->md_minor;
997         rdev->data_offset = 0;
998         rdev->new_data_offset = 0;
999         rdev->sb_size = MD_SB_BYTES;
1000         rdev->badblocks.shift = -1;
1001
1002         if (sb->level == LEVEL_MULTIPATH)
1003                 rdev->desc_nr = -1;
1004         else
1005                 rdev->desc_nr = sb->this_disk.number;
1006
1007         if (!refdev) {
1008                 ret = 1;
1009         } else {
1010                 __u64 ev1, ev2;
1011                 mdp_super_t *refsb = page_address(refdev->sb_page);
1012                 if (!uuid_equal(refsb, sb)) {
1013                         printk(KERN_WARNING "md: %s has different UUID to %s\n",
1014                                 b, bdevname(refdev->bdev,b2));
1015                         goto abort;
1016                 }
1017                 if (!sb_equal(refsb, sb)) {
1018                         printk(KERN_WARNING "md: %s has same UUID"
1019                                " but different superblock to %s\n",
1020                                b, bdevname(refdev->bdev, b2));
1021                         goto abort;
1022                 }
1023                 ev1 = md_event(sb);
1024                 ev2 = md_event(refsb);
1025                 if (ev1 > ev2)
1026                         ret = 1;
1027                 else
1028                         ret = 0;
1029         }
1030         rdev->sectors = rdev->sb_start;
1031         /* Limit to 4TB as metadata cannot record more than that.
1032          * (not needed for Linear and RAID0 as metadata doesn't
1033          * record this size)
1034          */
1035         if (IS_ENABLED(CONFIG_LBDAF) && (u64)rdev->sectors >= (2ULL << 32) &&
1036             sb->level >= 1)
1037                 rdev->sectors = (sector_t)(2ULL << 32) - 2;
1038
1039         if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
1040                 /* "this cannot possibly happen" ... */
1041                 ret = -EINVAL;
1042
1043  abort:
1044         return ret;
1045 }
1046
1047 /*
1048  * validate_super for 0.90.0
1049  */
1050 static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
1051 {
1052         mdp_disk_t *desc;
1053         mdp_super_t *sb = page_address(rdev->sb_page);
1054         __u64 ev1 = md_event(sb);
1055
1056         rdev->raid_disk = -1;
1057         clear_bit(Faulty, &rdev->flags);
1058         clear_bit(In_sync, &rdev->flags);
1059         clear_bit(Bitmap_sync, &rdev->flags);
1060         clear_bit(WriteMostly, &rdev->flags);
1061
1062         if (mddev->raid_disks == 0) {
1063                 mddev->major_version = 0;
1064                 mddev->minor_version = sb->minor_version;
1065                 mddev->patch_version = sb->patch_version;
1066                 mddev->external = 0;
1067                 mddev->chunk_sectors = sb->chunk_size >> 9;
1068                 mddev->ctime = sb->ctime;
1069                 mddev->utime = sb->utime;
1070                 mddev->level = sb->level;
1071                 mddev->clevel[0] = 0;
1072                 mddev->layout = sb->layout;
1073                 mddev->raid_disks = sb->raid_disks;
1074                 mddev->dev_sectors = ((sector_t)sb->size) * 2;
1075                 mddev->events = ev1;
1076                 mddev->bitmap_info.offset = 0;
1077                 mddev->bitmap_info.space = 0;
1078                 /* bitmap can use 60 K after the 4K superblocks */
1079                 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
1080                 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
1081                 mddev->reshape_backwards = 0;
1082
1083                 if (mddev->minor_version >= 91) {
1084                         mddev->reshape_position = sb->reshape_position;
1085                         mddev->delta_disks = sb->delta_disks;
1086                         mddev->new_level = sb->new_level;
1087                         mddev->new_layout = sb->new_layout;
1088                         mddev->new_chunk_sectors = sb->new_chunk >> 9;
1089                         if (mddev->delta_disks < 0)
1090                                 mddev->reshape_backwards = 1;
1091                 } else {
1092                         mddev->reshape_position = MaxSector;
1093                         mddev->delta_disks = 0;
1094                         mddev->new_level = mddev->level;
1095                         mddev->new_layout = mddev->layout;
1096                         mddev->new_chunk_sectors = mddev->chunk_sectors;
1097                 }
1098
1099                 if (sb->state & (1<<MD_SB_CLEAN))
1100                         mddev->recovery_cp = MaxSector;
1101                 else {
1102                         if (sb->events_hi == sb->cp_events_hi &&
1103                                 sb->events_lo == sb->cp_events_lo) {
1104                                 mddev->recovery_cp = sb->recovery_cp;
1105                         } else
1106                                 mddev->recovery_cp = 0;
1107                 }
1108
1109                 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
1110                 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
1111                 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
1112                 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
1113
1114                 mddev->max_disks = MD_SB_DISKS;
1115
1116                 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
1117                     mddev->bitmap_info.file == NULL) {
1118                         mddev->bitmap_info.offset =
1119                                 mddev->bitmap_info.default_offset;
1120                         mddev->bitmap_info.space =
1121                                 mddev->bitmap_info.default_space;
1122                 }
1123
1124         } else if (mddev->pers == NULL) {
1125                 /* Insist on good event counter while assembling, except
1126                  * for spares (which don't need an event count) */
1127                 ++ev1;
1128                 if (sb->disks[rdev->desc_nr].state & (
1129                             (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
1130                         if (ev1 < mddev->events)
1131                                 return -EINVAL;
1132         } else if (mddev->bitmap) {
1133                 /* if adding to array with a bitmap, then we can accept an
1134                  * older device ... but not too old.
1135                  */
1136                 if (ev1 < mddev->bitmap->events_cleared)
1137                         return 0;
1138                 if (ev1 < mddev->events)
1139                         set_bit(Bitmap_sync, &rdev->flags);
1140         } else {
1141                 if (ev1 < mddev->events)
1142                         /* just a hot-add of a new device, leave raid_disk at -1 */
1143                         return 0;
1144         }
1145
1146         if (mddev->level != LEVEL_MULTIPATH) {
1147                 desc = sb->disks + rdev->desc_nr;
1148
1149                 if (desc->state & (1<<MD_DISK_FAULTY))
1150                         set_bit(Faulty, &rdev->flags);
1151                 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
1152                             desc->raid_disk < mddev->raid_disks */) {
1153                         set_bit(In_sync, &rdev->flags);
1154                         rdev->raid_disk = desc->raid_disk;
1155                         rdev->saved_raid_disk = desc->raid_disk;
1156                 } else if (desc->state & (1<<MD_DISK_ACTIVE)) {
1157                         /* active but not in sync implies recovery up to
1158                          * reshape position.  We don't know exactly where
1159                          * that is, so set to zero for now */
1160                         if (mddev->minor_version >= 91) {
1161                                 rdev->recovery_offset = 0;
1162                                 rdev->raid_disk = desc->raid_disk;
1163                         }
1164                 }
1165                 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
1166                         set_bit(WriteMostly, &rdev->flags);
1167         } else /* MULTIPATH are always insync */
1168                 set_bit(In_sync, &rdev->flags);
1169         return 0;
1170 }
1171
1172 /*
1173  * sync_super for 0.90.0
1174  */
1175 static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
1176 {
1177         mdp_super_t *sb;
1178         struct md_rdev *rdev2;
1179         int next_spare = mddev->raid_disks;
1180
1181         /* make rdev->sb match mddev data..
1182          *
1183          * 1/ zero out disks
1184          * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
1185          * 3/ any empty disks < next_spare become removed
1186          *
1187          * disks[0] gets initialised to REMOVED because
1188          * we cannot be sure from other fields if it has
1189          * been initialised or not.
1190          */
1191         int i;
1192         int active=0, working=0,failed=0,spare=0,nr_disks=0;
1193
1194         rdev->sb_size = MD_SB_BYTES;
1195
1196         sb = page_address(rdev->sb_page);
1197
1198         memset(sb, 0, sizeof(*sb));
1199
1200         sb->md_magic = MD_SB_MAGIC;
1201         sb->major_version = mddev->major_version;
1202         sb->patch_version = mddev->patch_version;
1203         sb->gvalid_words  = 0; /* ignored */
1204         memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
1205         memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
1206         memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
1207         memcpy(&sb->set_uuid3, mddev->uuid+12,4);
1208
1209         sb->ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
1210         sb->level = mddev->level;
1211         sb->size = mddev->dev_sectors / 2;
1212         sb->raid_disks = mddev->raid_disks;
1213         sb->md_minor = mddev->md_minor;
1214         sb->not_persistent = 0;
1215         sb->utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
1216         sb->state = 0;
1217         sb->events_hi = (mddev->events>>32);
1218         sb->events_lo = (u32)mddev->events;
1219
1220         if (mddev->reshape_position == MaxSector)
1221                 sb->minor_version = 90;
1222         else {
1223                 sb->minor_version = 91;
1224                 sb->reshape_position = mddev->reshape_position;
1225                 sb->new_level = mddev->new_level;
1226                 sb->delta_disks = mddev->delta_disks;
1227                 sb->new_layout = mddev->new_layout;
1228                 sb->new_chunk = mddev->new_chunk_sectors << 9;
1229         }
1230         mddev->minor_version = sb->minor_version;
1231         if (mddev->in_sync)
1232         {
1233                 sb->recovery_cp = mddev->recovery_cp;
1234                 sb->cp_events_hi = (mddev->events>>32);
1235                 sb->cp_events_lo = (u32)mddev->events;
1236                 if (mddev->recovery_cp == MaxSector)
1237                         sb->state = (1<< MD_SB_CLEAN);
1238         } else
1239                 sb->recovery_cp = 0;
1240
1241         sb->layout = mddev->layout;
1242         sb->chunk_size = mddev->chunk_sectors << 9;
1243
1244         if (mddev->bitmap && mddev->bitmap_info.file == NULL)
1245                 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
1246
1247         sb->disks[0].state = (1<<MD_DISK_REMOVED);
1248         rdev_for_each(rdev2, mddev) {
1249                 mdp_disk_t *d;
1250                 int desc_nr;
1251                 int is_active = test_bit(In_sync, &rdev2->flags);
1252
1253                 if (rdev2->raid_disk >= 0 &&
1254                     sb->minor_version >= 91)
1255                         /* we have nowhere to store the recovery_offset,
1256                          * but if it is not below the reshape_position,
1257                          * we can piggy-back on that.
1258                          */
1259                         is_active = 1;
1260                 if (rdev2->raid_disk < 0 ||
1261                     test_bit(Faulty, &rdev2->flags))
1262                         is_active = 0;
1263                 if (is_active)
1264                         desc_nr = rdev2->raid_disk;
1265                 else
1266                         desc_nr = next_spare++;
1267                 rdev2->desc_nr = desc_nr;
1268                 d = &sb->disks[rdev2->desc_nr];
1269                 nr_disks++;
1270                 d->number = rdev2->desc_nr;
1271                 d->major = MAJOR(rdev2->bdev->bd_dev);
1272                 d->minor = MINOR(rdev2->bdev->bd_dev);
1273                 if (is_active)
1274                         d->raid_disk = rdev2->raid_disk;
1275                 else
1276                         d->raid_disk = rdev2->desc_nr; /* compatibility */
1277                 if (test_bit(Faulty, &rdev2->flags))
1278                         d->state = (1<<MD_DISK_FAULTY);
1279                 else if (is_active) {
1280                         d->state = (1<<MD_DISK_ACTIVE);
1281                         if (test_bit(In_sync, &rdev2->flags))
1282                                 d->state |= (1<<MD_DISK_SYNC);
1283                         active++;
1284                         working++;
1285                 } else {
1286                         d->state = 0;
1287                         spare++;
1288                         working++;
1289                 }
1290                 if (test_bit(WriteMostly, &rdev2->flags))
1291                         d->state |= (1<<MD_DISK_WRITEMOSTLY);
1292         }
1293         /* now set the "removed" and "faulty" bits on any missing devices */
1294         for (i=0 ; i < mddev->raid_disks ; i++) {
1295                 mdp_disk_t *d = &sb->disks[i];
1296                 if (d->state == 0 && d->number == 0) {
1297                         d->number = i;
1298                         d->raid_disk = i;
1299                         d->state = (1<<MD_DISK_REMOVED);
1300                         d->state |= (1<<MD_DISK_FAULTY);
1301                         failed++;
1302                 }
1303         }
1304         sb->nr_disks = nr_disks;
1305         sb->active_disks = active;
1306         sb->working_disks = working;
1307         sb->failed_disks = failed;
1308         sb->spare_disks = spare;
1309
1310         sb->this_disk = sb->disks[rdev->desc_nr];
1311         sb->sb_csum = calc_sb_csum(sb);
1312 }
1313
1314 /*
1315  * rdev_size_change for 0.90.0
1316  */
1317 static unsigned long long
1318 super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1319 {
1320         if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1321                 return 0; /* component must fit device */
1322         if (rdev->mddev->bitmap_info.offset)
1323                 return 0; /* can't move bitmap */
1324         rdev->sb_start = calc_dev_sboffset(rdev);
1325         if (!num_sectors || num_sectors > rdev->sb_start)
1326                 num_sectors = rdev->sb_start;
1327         /* Limit to 4TB as metadata cannot record more than that.
1328          * 4TB == 2^32 KB, or 2*2^32 sectors.
1329          */
1330         if (IS_ENABLED(CONFIG_LBDAF) && (u64)num_sectors >= (2ULL << 32) &&
1331             rdev->mddev->level >= 1)
1332                 num_sectors = (sector_t)(2ULL << 32) - 2;
1333         md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1334                        rdev->sb_page);
1335         md_super_wait(rdev->mddev);
1336         return num_sectors;
1337 }
1338
1339 static int
1340 super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset)
1341 {
1342         /* non-zero offset changes not possible with v0.90 */
1343         return new_offset == 0;
1344 }
1345
1346 /*
1347  * version 1 superblock
1348  */
1349
1350 static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb)
1351 {
1352         __le32 disk_csum;
1353         u32 csum;
1354         unsigned long long newcsum;
1355         int size = 256 + le32_to_cpu(sb->max_dev)*2;
1356         __le32 *isuper = (__le32*)sb;
1357
1358         disk_csum = sb->sb_csum;
1359         sb->sb_csum = 0;
1360         newcsum = 0;
1361         for (; size >= 4; size -= 4)
1362                 newcsum += le32_to_cpu(*isuper++);
1363
1364         if (size == 2)
1365                 newcsum += le16_to_cpu(*(__le16*) isuper);
1366
1367         csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1368         sb->sb_csum = disk_csum;
1369         return cpu_to_le32(csum);
1370 }
1371
1372 static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
1373 {
1374         struct mdp_superblock_1 *sb;
1375         int ret;
1376         sector_t sb_start;
1377         sector_t sectors;
1378         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1379         int bmask;
1380
1381         /*
1382          * Calculate the position of the superblock in 512byte sectors.
1383          * It is always aligned to a 4K boundary and
1384          * depeding on minor_version, it can be:
1385          * 0: At least 8K, but less than 12K, from end of device
1386          * 1: At start of device
1387          * 2: 4K from start of device.
1388          */
1389         switch(minor_version) {
1390         case 0:
1391                 sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
1392                 sb_start -= 8*2;
1393                 sb_start &= ~(sector_t)(4*2-1);
1394                 break;
1395         case 1:
1396                 sb_start = 0;
1397                 break;
1398         case 2:
1399                 sb_start = 8;
1400                 break;
1401         default:
1402                 return -EINVAL;
1403         }
1404         rdev->sb_start = sb_start;
1405
1406         /* superblock is rarely larger than 1K, but it can be larger,
1407          * and it is safe to read 4k, so we do that
1408          */
1409         ret = read_disk_sb(rdev, 4096);
1410         if (ret) return ret;
1411
1412         sb = page_address(rdev->sb_page);
1413
1414         if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1415             sb->major_version != cpu_to_le32(1) ||
1416             le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1417             le64_to_cpu(sb->super_offset) != rdev->sb_start ||
1418             (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1419                 return -EINVAL;
1420
1421         if (calc_sb_1_csum(sb) != sb->sb_csum) {
1422                 printk("md: invalid superblock checksum on %s\n",
1423                         bdevname(rdev->bdev,b));
1424                 return -EINVAL;
1425         }
1426         if (le64_to_cpu(sb->data_size) < 10) {
1427                 printk("md: data_size too small on %s\n",
1428                        bdevname(rdev->bdev,b));
1429                 return -EINVAL;
1430         }
1431         if (sb->pad0 ||
1432             sb->pad3[0] ||
1433             memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1])))
1434                 /* Some padding is non-zero, might be a new feature */
1435                 return -EINVAL;
1436
1437         rdev->preferred_minor = 0xffff;
1438         rdev->data_offset = le64_to_cpu(sb->data_offset);
1439         rdev->new_data_offset = rdev->data_offset;
1440         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
1441             (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
1442                 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
1443         atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1444
1445         rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1446         bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1447         if (rdev->sb_size & bmask)
1448                 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1449
1450         if (minor_version
1451             && rdev->data_offset < sb_start + (rdev->sb_size/512))
1452                 return -EINVAL;
1453         if (minor_version
1454             && rdev->new_data_offset < sb_start + (rdev->sb_size/512))
1455                 return -EINVAL;
1456
1457         if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1458                 rdev->desc_nr = -1;
1459         else
1460                 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1461
1462         if (!rdev->bb_page) {
1463                 rdev->bb_page = alloc_page(GFP_KERNEL);
1464                 if (!rdev->bb_page)
1465                         return -ENOMEM;
1466         }
1467         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
1468             rdev->badblocks.count == 0) {
1469                 /* need to load the bad block list.
1470                  * Currently we limit it to one page.
1471                  */
1472                 s32 offset;
1473                 sector_t bb_sector;
1474                 u64 *bbp;
1475                 int i;
1476                 int sectors = le16_to_cpu(sb->bblog_size);
1477                 if (sectors > (PAGE_SIZE / 512))
1478                         return -EINVAL;
1479                 offset = le32_to_cpu(sb->bblog_offset);
1480                 if (offset == 0)
1481                         return -EINVAL;
1482                 bb_sector = (long long)offset;
1483                 if (!sync_page_io(rdev, bb_sector, sectors << 9,
1484                                   rdev->bb_page, REQ_OP_READ, 0, true))
1485                         return -EIO;
1486                 bbp = (u64 *)page_address(rdev->bb_page);
1487                 rdev->badblocks.shift = sb->bblog_shift;
1488                 for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
1489                         u64 bb = le64_to_cpu(*bbp);
1490                         int count = bb & (0x3ff);
1491                         u64 sector = bb >> 10;
1492                         sector <<= sb->bblog_shift;
1493                         count <<= sb->bblog_shift;
1494                         if (bb + 1 == 0)
1495                                 break;
1496                         if (badblocks_set(&rdev->badblocks, sector, count, 1))
1497                                 return -EINVAL;
1498                 }
1499         } else if (sb->bblog_offset != 0)
1500                 rdev->badblocks.shift = 0;
1501
1502         if (!refdev) {
1503                 ret = 1;
1504         } else {
1505                 __u64 ev1, ev2;
1506                 struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
1507
1508                 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1509                     sb->level != refsb->level ||
1510                     sb->layout != refsb->layout ||
1511                     sb->chunksize != refsb->chunksize) {
1512                         printk(KERN_WARNING "md: %s has strangely different"
1513                                 " superblock to %s\n",
1514                                 bdevname(rdev->bdev,b),
1515                                 bdevname(refdev->bdev,b2));
1516                         return -EINVAL;
1517                 }
1518                 ev1 = le64_to_cpu(sb->events);
1519                 ev2 = le64_to_cpu(refsb->events);
1520
1521                 if (ev1 > ev2)
1522                         ret = 1;
1523                 else
1524                         ret = 0;
1525         }
1526         if (minor_version) {
1527                 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9);
1528                 sectors -= rdev->data_offset;
1529         } else
1530                 sectors = rdev->sb_start;
1531         if (sectors < le64_to_cpu(sb->data_size))
1532                 return -EINVAL;
1533         rdev->sectors = le64_to_cpu(sb->data_size);
1534         return ret;
1535 }
1536
1537 static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
1538 {
1539         struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
1540         __u64 ev1 = le64_to_cpu(sb->events);
1541
1542         rdev->raid_disk = -1;
1543         clear_bit(Faulty, &rdev->flags);
1544         clear_bit(In_sync, &rdev->flags);
1545         clear_bit(Bitmap_sync, &rdev->flags);
1546         clear_bit(WriteMostly, &rdev->flags);
1547
1548         if (mddev->raid_disks == 0) {
1549                 mddev->major_version = 1;
1550                 mddev->patch_version = 0;
1551                 mddev->external = 0;
1552                 mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
1553                 mddev->ctime = le64_to_cpu(sb->ctime);
1554                 mddev->utime = le64_to_cpu(sb->utime);
1555                 mddev->level = le32_to_cpu(sb->level);
1556                 mddev->clevel[0] = 0;
1557                 mddev->layout = le32_to_cpu(sb->layout);
1558                 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1559                 mddev->dev_sectors = le64_to_cpu(sb->size);
1560                 mddev->events = ev1;
1561                 mddev->bitmap_info.offset = 0;
1562                 mddev->bitmap_info.space = 0;
1563                 /* Default location for bitmap is 1K after superblock
1564                  * using 3K - total of 4K
1565                  */
1566                 mddev->bitmap_info.default_offset = 1024 >> 9;
1567                 mddev->bitmap_info.default_space = (4096-1024) >> 9;
1568                 mddev->reshape_backwards = 0;
1569
1570                 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1571                 memcpy(mddev->uuid, sb->set_uuid, 16);
1572
1573                 mddev->max_disks =  (4096-256)/2;
1574
1575                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1576                     mddev->bitmap_info.file == NULL) {
1577                         mddev->bitmap_info.offset =
1578                                 (__s32)le32_to_cpu(sb->bitmap_offset);
1579                         /* Metadata doesn't record how much space is available.
1580                          * For 1.0, we assume we can use up to the superblock
1581                          * if before, else to 4K beyond superblock.
1582                          * For others, assume no change is possible.
1583                          */
1584                         if (mddev->minor_version > 0)
1585                                 mddev->bitmap_info.space = 0;
1586                         else if (mddev->bitmap_info.offset > 0)
1587                                 mddev->bitmap_info.space =
1588                                         8 - mddev->bitmap_info.offset;
1589                         else
1590                                 mddev->bitmap_info.space =
1591                                         -mddev->bitmap_info.offset;
1592                 }
1593
1594                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1595                         mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1596                         mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1597                         mddev->new_level = le32_to_cpu(sb->new_level);
1598                         mddev->new_layout = le32_to_cpu(sb->new_layout);
1599                         mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
1600                         if (mddev->delta_disks < 0 ||
1601                             (mddev->delta_disks == 0 &&
1602                              (le32_to_cpu(sb->feature_map)
1603                               & MD_FEATURE_RESHAPE_BACKWARDS)))
1604                                 mddev->reshape_backwards = 1;
1605                 } else {
1606                         mddev->reshape_position = MaxSector;
1607                         mddev->delta_disks = 0;
1608                         mddev->new_level = mddev->level;
1609                         mddev->new_layout = mddev->layout;
1610                         mddev->new_chunk_sectors = mddev->chunk_sectors;
1611                 }
1612
1613                 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)
1614                         set_bit(MD_HAS_JOURNAL, &mddev->flags);
1615         } else if (mddev->pers == NULL) {
1616                 /* Insist of good event counter while assembling, except for
1617                  * spares (which don't need an event count) */
1618                 ++ev1;
1619                 if (rdev->desc_nr >= 0 &&
1620                     rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1621                     (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
1622                      le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL))
1623                         if (ev1 < mddev->events)
1624                                 return -EINVAL;
1625         } else if (mddev->bitmap) {
1626                 /* If adding to array with a bitmap, then we can accept an
1627                  * older device, but not too old.
1628                  */
1629                 if (ev1 < mddev->bitmap->events_cleared)
1630                         return 0;
1631                 if (ev1 < mddev->events)
1632                         set_bit(Bitmap_sync, &rdev->flags);
1633         } else {
1634                 if (ev1 < mddev->events)
1635                         /* just a hot-add of a new device, leave raid_disk at -1 */
1636                         return 0;
1637         }
1638         if (mddev->level != LEVEL_MULTIPATH) {
1639                 int role;
1640                 if (rdev->desc_nr < 0 ||
1641                     rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
1642                         role = MD_DISK_ROLE_SPARE;
1643                         rdev->desc_nr = -1;
1644                 } else
1645                         role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1646                 switch(role) {
1647                 case MD_DISK_ROLE_SPARE: /* spare */
1648                         break;
1649                 case MD_DISK_ROLE_FAULTY: /* faulty */
1650                         set_bit(Faulty, &rdev->flags);
1651                         break;
1652                 case MD_DISK_ROLE_JOURNAL: /* journal device */
1653                         if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) {
1654                                 /* journal device without journal feature */
1655                                 printk(KERN_WARNING
1656                                   "md: journal device provided without journal feature, ignoring the device\n");
1657                                 return -EINVAL;
1658                         }
1659                         set_bit(Journal, &rdev->flags);
1660                         rdev->journal_tail = le64_to_cpu(sb->journal_tail);
1661                         rdev->raid_disk = 0;
1662                         break;
1663                 default:
1664                         rdev->saved_raid_disk = role;
1665                         if ((le32_to_cpu(sb->feature_map) &
1666                              MD_FEATURE_RECOVERY_OFFSET)) {
1667                                 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1668                                 if (!(le32_to_cpu(sb->feature_map) &
1669                                       MD_FEATURE_RECOVERY_BITMAP))
1670                                         rdev->saved_raid_disk = -1;
1671                         } else {
1672                                 /*
1673                                  * If the array is FROZEN, then the device can't
1674                                  * be in_sync with rest of array.
1675                                  */
1676                                 if (!test_bit(MD_RECOVERY_FROZEN,
1677                                               &mddev->recovery))
1678                                         set_bit(In_sync, &rdev->flags);
1679                         }
1680                         rdev->raid_disk = role;
1681                         break;
1682                 }
1683                 if (sb->devflags & WriteMostly1)
1684                         set_bit(WriteMostly, &rdev->flags);
1685                 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
1686                         set_bit(Replacement, &rdev->flags);
1687         } else /* MULTIPATH are always insync */
1688                 set_bit(In_sync, &rdev->flags);
1689
1690         return 0;
1691 }
1692
1693 static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
1694 {
1695         struct mdp_superblock_1 *sb;
1696         struct md_rdev *rdev2;
1697         int max_dev, i;
1698         /* make rdev->sb match mddev and rdev data. */
1699
1700         sb = page_address(rdev->sb_page);
1701
1702         sb->feature_map = 0;
1703         sb->pad0 = 0;
1704         sb->recovery_offset = cpu_to_le64(0);
1705         memset(sb->pad3, 0, sizeof(sb->pad3));
1706
1707         sb->utime = cpu_to_le64((__u64)mddev->utime);
1708         sb->events = cpu_to_le64(mddev->events);
1709         if (mddev->in_sync)
1710                 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1711         else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags))
1712                 sb->resync_offset = cpu_to_le64(MaxSector);
1713         else
1714                 sb->resync_offset = cpu_to_le64(0);
1715
1716         sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
1717
1718         sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1719         sb->size = cpu_to_le64(mddev->dev_sectors);
1720         sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
1721         sb->level = cpu_to_le32(mddev->level);
1722         sb->layout = cpu_to_le32(mddev->layout);
1723
1724         if (test_bit(WriteMostly, &rdev->flags))
1725                 sb->devflags |= WriteMostly1;
1726         else
1727                 sb->devflags &= ~WriteMostly1;
1728         sb->data_offset = cpu_to_le64(rdev->data_offset);
1729         sb->data_size = cpu_to_le64(rdev->sectors);
1730
1731         if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
1732                 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
1733                 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1734         }
1735
1736         if (rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags) &&
1737             !test_bit(In_sync, &rdev->flags)) {
1738                 sb->feature_map |=
1739                         cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
1740                 sb->recovery_offset =
1741                         cpu_to_le64(rdev->recovery_offset);
1742                 if (rdev->saved_raid_disk >= 0 && mddev->bitmap)
1743                         sb->feature_map |=
1744                                 cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP);
1745         }
1746         /* Note: recovery_offset and journal_tail share space  */
1747         if (test_bit(Journal, &rdev->flags))
1748                 sb->journal_tail = cpu_to_le64(rdev->journal_tail);
1749         if (test_bit(Replacement, &rdev->flags))
1750                 sb->feature_map |=
1751                         cpu_to_le32(MD_FEATURE_REPLACEMENT);
1752
1753         if (mddev->reshape_position != MaxSector) {
1754                 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
1755                 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
1756                 sb->new_layout = cpu_to_le32(mddev->new_layout);
1757                 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
1758                 sb->new_level = cpu_to_le32(mddev->new_level);
1759                 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
1760                 if (mddev->delta_disks == 0 &&
1761                     mddev->reshape_backwards)
1762                         sb->feature_map
1763                                 |= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS);
1764                 if (rdev->new_data_offset != rdev->data_offset) {
1765                         sb->feature_map
1766                                 |= cpu_to_le32(MD_FEATURE_NEW_OFFSET);
1767                         sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset
1768                                                              - rdev->data_offset));
1769                 }
1770         }
1771
1772         if (mddev_is_clustered(mddev))
1773                 sb->feature_map |= cpu_to_le32(MD_FEATURE_CLUSTERED);
1774
1775         if (rdev->badblocks.count == 0)
1776                 /* Nothing to do for bad blocks*/ ;
1777         else if (sb->bblog_offset == 0)
1778                 /* Cannot record bad blocks on this device */
1779                 md_error(mddev, rdev);
1780         else {
1781                 struct badblocks *bb = &rdev->badblocks;
1782                 u64 *bbp = (u64 *)page_address(rdev->bb_page);
1783                 u64 *p = bb->page;
1784                 sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
1785                 if (bb->changed) {
1786                         unsigned seq;
1787
1788 retry:
1789                         seq = read_seqbegin(&bb->lock);
1790
1791                         memset(bbp, 0xff, PAGE_SIZE);
1792
1793                         for (i = 0 ; i < bb->count ; i++) {
1794                                 u64 internal_bb = p[i];
1795                                 u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
1796                                                 | BB_LEN(internal_bb));
1797                                 bbp[i] = cpu_to_le64(store_bb);
1798                         }
1799                         bb->changed = 0;
1800                         if (read_seqretry(&bb->lock, seq))
1801                                 goto retry;
1802
1803                         bb->sector = (rdev->sb_start +
1804                                       (int)le32_to_cpu(sb->bblog_offset));
1805                         bb->size = le16_to_cpu(sb->bblog_size);
1806                 }
1807         }
1808
1809         max_dev = 0;
1810         rdev_for_each(rdev2, mddev)
1811                 if (rdev2->desc_nr+1 > max_dev)
1812                         max_dev = rdev2->desc_nr+1;
1813
1814         if (max_dev > le32_to_cpu(sb->max_dev)) {
1815                 int bmask;
1816                 sb->max_dev = cpu_to_le32(max_dev);
1817                 rdev->sb_size = max_dev * 2 + 256;
1818                 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1819                 if (rdev->sb_size & bmask)
1820                         rdev->sb_size = (rdev->sb_size | bmask) + 1;
1821         } else
1822                 max_dev = le32_to_cpu(sb->max_dev);
1823
1824         for (i=0; i<max_dev;i++)
1825                 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY);
1826
1827         if (test_bit(MD_HAS_JOURNAL, &mddev->flags))
1828                 sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL);
1829
1830         rdev_for_each(rdev2, mddev) {
1831                 i = rdev2->desc_nr;
1832                 if (test_bit(Faulty, &rdev2->flags))
1833                         sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY);
1834                 else if (test_bit(In_sync, &rdev2->flags))
1835                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1836                 else if (test_bit(Journal, &rdev2->flags))
1837                         sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_JOURNAL);
1838                 else if (rdev2->raid_disk >= 0)
1839                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1840                 else
1841                         sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
1842         }
1843
1844         sb->sb_csum = calc_sb_1_csum(sb);
1845 }
1846
1847 static unsigned long long
1848 super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1849 {
1850         struct mdp_superblock_1 *sb;
1851         sector_t max_sectors;
1852         if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1853                 return 0; /* component must fit device */
1854         if (rdev->data_offset != rdev->new_data_offset)
1855                 return 0; /* too confusing */
1856         if (rdev->sb_start < rdev->data_offset) {
1857                 /* minor versions 1 and 2; superblock before data */
1858                 max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
1859                 max_sectors -= rdev->data_offset;
1860                 if (!num_sectors || num_sectors > max_sectors)
1861                         num_sectors = max_sectors;
1862         } else if (rdev->mddev->bitmap_info.offset) {
1863                 /* minor version 0 with bitmap we can't move */
1864                 return 0;
1865         } else {
1866                 /* minor version 0; superblock after data */
1867                 sector_t sb_start;
1868                 sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2;
1869                 sb_start &= ~(sector_t)(4*2 - 1);
1870                 max_sectors = rdev->sectors + sb_start - rdev->sb_start;
1871                 if (!num_sectors || num_sectors > max_sectors)
1872                         num_sectors = max_sectors;
1873                 rdev->sb_start = sb_start;
1874         }
1875         sb = page_address(rdev->sb_page);
1876         sb->data_size = cpu_to_le64(num_sectors);
1877         sb->super_offset = cpu_to_le64(rdev->sb_start);
1878         sb->sb_csum = calc_sb_1_csum(sb);
1879         md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1880                        rdev->sb_page);
1881         md_super_wait(rdev->mddev);
1882         return num_sectors;
1883
1884 }
1885
1886 static int
1887 super_1_allow_new_offset(struct md_rdev *rdev,
1888                          unsigned long long new_offset)
1889 {
1890         /* All necessary checks on new >= old have been done */
1891         struct bitmap *bitmap;
1892         if (new_offset >= rdev->data_offset)
1893                 return 1;
1894
1895         /* with 1.0 metadata, there is no metadata to tread on
1896          * so we can always move back */
1897         if (rdev->mddev->minor_version == 0)
1898                 return 1;
1899
1900         /* otherwise we must be sure not to step on
1901          * any metadata, so stay:
1902          * 36K beyond start of superblock
1903          * beyond end of badblocks
1904          * beyond write-intent bitmap
1905          */
1906         if (rdev->sb_start + (32+4)*2 > new_offset)
1907                 return 0;
1908         bitmap = rdev->mddev->bitmap;
1909         if (bitmap && !rdev->mddev->bitmap_info.file &&
1910             rdev->sb_start + rdev->mddev->bitmap_info.offset +
1911             bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset)
1912                 return 0;
1913         if (rdev->badblocks.sector + rdev->badblocks.size > new_offset)
1914                 return 0;
1915
1916         return 1;
1917 }
1918
1919 static struct super_type super_types[] = {
1920         [0] = {
1921                 .name   = "0.90.0",
1922                 .owner  = THIS_MODULE,
1923                 .load_super         = super_90_load,
1924                 .validate_super     = super_90_validate,
1925                 .sync_super         = super_90_sync,
1926                 .rdev_size_change   = super_90_rdev_size_change,
1927                 .allow_new_offset   = super_90_allow_new_offset,
1928         },
1929         [1] = {
1930                 .name   = "md-1",
1931                 .owner  = THIS_MODULE,
1932                 .load_super         = super_1_load,
1933                 .validate_super     = super_1_validate,
1934                 .sync_super         = super_1_sync,
1935                 .rdev_size_change   = super_1_rdev_size_change,
1936                 .allow_new_offset   = super_1_allow_new_offset,
1937         },
1938 };
1939
1940 static void sync_super(struct mddev *mddev, struct md_rdev *rdev)
1941 {
1942         if (mddev->sync_super) {
1943                 mddev->sync_super(mddev, rdev);
1944                 return;
1945         }
1946
1947         BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
1948
1949         super_types[mddev->major_version].sync_super(mddev, rdev);
1950 }
1951
1952 static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2)
1953 {
1954         struct md_rdev *rdev, *rdev2;
1955
1956         rcu_read_lock();
1957         rdev_for_each_rcu(rdev, mddev1) {
1958                 if (test_bit(Faulty, &rdev->flags) ||
1959                     test_bit(Journal, &rdev->flags) ||
1960                     rdev->raid_disk == -1)
1961                         continue;
1962                 rdev_for_each_rcu(rdev2, mddev2) {
1963                         if (test_bit(Faulty, &rdev2->flags) ||
1964                             test_bit(Journal, &rdev2->flags) ||
1965                             rdev2->raid_disk == -1)
1966                                 continue;
1967                         if (rdev->bdev->bd_contains ==
1968                             rdev2->bdev->bd_contains) {
1969                                 rcu_read_unlock();
1970                                 return 1;
1971                         }
1972                 }
1973         }
1974         rcu_read_unlock();
1975         return 0;
1976 }
1977
1978 static LIST_HEAD(pending_raid_disks);
1979
1980 /*
1981  * Try to register data integrity profile for an mddev
1982  *
1983  * This is called when an array is started and after a disk has been kicked
1984  * from the array. It only succeeds if all working and active component devices
1985  * are integrity capable with matching profiles.
1986  */
1987 int md_integrity_register(struct mddev *mddev)
1988 {
1989         struct md_rdev *rdev, *reference = NULL;
1990
1991         if (list_empty(&mddev->disks))
1992                 return 0; /* nothing to do */
1993         if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
1994                 return 0; /* shouldn't register, or already is */
1995         rdev_for_each(rdev, mddev) {
1996                 /* skip spares and non-functional disks */
1997                 if (test_bit(Faulty, &rdev->flags))
1998                         continue;
1999                 if (rdev->raid_disk < 0)
2000                         continue;
2001                 if (!reference) {
2002                         /* Use the first rdev as the reference */
2003                         reference = rdev;
2004                         continue;
2005                 }
2006                 /* does this rdev's profile match the reference profile? */
2007                 if (blk_integrity_compare(reference->bdev->bd_disk,
2008                                 rdev->bdev->bd_disk) < 0)
2009                         return -EINVAL;
2010         }
2011         if (!reference || !bdev_get_integrity(reference->bdev))
2012                 return 0;
2013         /*
2014          * All component devices are integrity capable and have matching
2015          * profiles, register the common profile for the md device.
2016          */
2017         blk_integrity_register(mddev->gendisk,
2018                                bdev_get_integrity(reference->bdev));
2019
2020         printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev));
2021         if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) {
2022                 printk(KERN_ERR "md: failed to create integrity pool for %s\n",
2023                        mdname(mddev));
2024                 return -EINVAL;
2025         }
2026         return 0;
2027 }
2028 EXPORT_SYMBOL(md_integrity_register);
2029
2030 /*
2031  * Attempt to add an rdev, but only if it is consistent with the current
2032  * integrity profile
2033  */
2034 int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
2035 {
2036         struct blk_integrity *bi_rdev;
2037         struct blk_integrity *bi_mddev;
2038         char name[BDEVNAME_SIZE];
2039
2040         if (!mddev->gendisk)
2041                 return 0;
2042
2043         bi_rdev = bdev_get_integrity(rdev->bdev);
2044         bi_mddev = blk_get_integrity(mddev->gendisk);
2045
2046         if (!bi_mddev) /* nothing to do */
2047                 return 0;
2048
2049         if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) {
2050                 printk(KERN_NOTICE "%s: incompatible integrity profile for %s\n",
2051                                 mdname(mddev), bdevname(rdev->bdev, name));
2052                 return -ENXIO;
2053         }
2054
2055         return 0;
2056 }
2057 EXPORT_SYMBOL(md_integrity_add_rdev);
2058
2059 static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
2060 {
2061         char b[BDEVNAME_SIZE];
2062         struct kobject *ko;
2063         int err;
2064
2065         /* prevent duplicates */
2066         if (find_rdev(mddev, rdev->bdev->bd_dev))
2067                 return -EEXIST;
2068
2069         /* make sure rdev->sectors exceeds mddev->dev_sectors */
2070         if (!test_bit(Journal, &rdev->flags) &&
2071             rdev->sectors &&
2072             (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) {
2073                 if (mddev->pers) {
2074                         /* Cannot change size, so fail
2075                          * If mddev->level <= 0, then we don't care
2076                          * about aligning sizes (e.g. linear)
2077                          */
2078                         if (mddev->level > 0)
2079                                 return -ENOSPC;
2080                 } else
2081                         mddev->dev_sectors = rdev->sectors;
2082         }
2083
2084         /* Verify rdev->desc_nr is unique.
2085          * If it is -1, assign a free number, else
2086          * check number is not in use
2087          */
2088         rcu_read_lock();
2089         if (rdev->desc_nr < 0) {
2090                 int choice = 0;
2091                 if (mddev->pers)
2092                         choice = mddev->raid_disks;
2093                 while (md_find_rdev_nr_rcu(mddev, choice))
2094                         choice++;
2095                 rdev->desc_nr = choice;
2096         } else {
2097                 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) {
2098                         rcu_read_unlock();
2099                         return -EBUSY;
2100                 }
2101         }
2102         rcu_read_unlock();
2103         if (!test_bit(Journal, &rdev->flags) &&
2104             mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
2105                 printk(KERN_WARNING "md: %s: array is limited to %d devices\n",
2106                        mdname(mddev), mddev->max_disks);
2107                 return -EBUSY;
2108         }
2109         bdevname(rdev->bdev,b);
2110         strreplace(b, '/', '!');
2111
2112         rdev->mddev = mddev;
2113         printk(KERN_INFO "md: bind<%s>\n", b);
2114
2115         if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
2116                 goto fail;
2117
2118         ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
2119         if (sysfs_create_link(&rdev->kobj, ko, "block"))
2120                 /* failure here is OK */;
2121         rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
2122
2123         list_add_rcu(&rdev->same_set, &mddev->disks);
2124         bd_link_disk_holder(rdev->bdev, mddev->gendisk);
2125
2126         /* May as well allow recovery to be retried once */
2127         mddev->recovery_disabled++;
2128
2129         return 0;
2130
2131  fail:
2132         printk(KERN_WARNING "md: failed to register dev-%s for %s\n",
2133                b, mdname(mddev));
2134         return err;
2135 }
2136
2137 static void md_delayed_delete(struct work_struct *ws)
2138 {
2139         struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work);
2140         kobject_del(&rdev->kobj);
2141         kobject_put(&rdev->kobj);
2142 }
2143
2144 static void unbind_rdev_from_array(struct md_rdev *rdev)
2145 {
2146         char b[BDEVNAME_SIZE];
2147
2148         bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
2149         list_del_rcu(&rdev->same_set);
2150         printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
2151         rdev->mddev = NULL;
2152         sysfs_remove_link(&rdev->kobj, "block");
2153         sysfs_put(rdev->sysfs_state);
2154         rdev->sysfs_state = NULL;
2155         rdev->badblocks.count = 0;
2156         /* We need to delay this, otherwise we can deadlock when
2157          * writing to 'remove' to "dev/state".  We also need
2158          * to delay it due to rcu usage.
2159          */
2160         synchronize_rcu();
2161         INIT_WORK(&rdev->del_work, md_delayed_delete);
2162         kobject_get(&rdev->kobj);
2163         queue_work(md_misc_wq, &rdev->del_work);
2164 }
2165
2166 /*
2167  * prevent the device from being mounted, repartitioned or
2168  * otherwise reused by a RAID array (or any other kernel
2169  * subsystem), by bd_claiming the device.
2170  */
2171 static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared)
2172 {
2173         int err = 0;
2174         struct block_device *bdev;
2175         char b[BDEVNAME_SIZE];
2176
2177         bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
2178                                  shared ? (struct md_rdev *)lock_rdev : rdev);
2179         if (IS_ERR(bdev)) {
2180                 printk(KERN_ERR "md: could not open %s.\n",
2181                         __bdevname(dev, b));
2182                 return PTR_ERR(bdev);
2183         }
2184         rdev->bdev = bdev;
2185         return err;
2186 }
2187
2188 static void unlock_rdev(struct md_rdev *rdev)
2189 {
2190         struct block_device *bdev = rdev->bdev;
2191         rdev->bdev = NULL;
2192         blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
2193 }
2194
2195 void md_autodetect_dev(dev_t dev);
2196
2197 static void export_rdev(struct md_rdev *rdev)
2198 {
2199         char b[BDEVNAME_SIZE];
2200
2201         printk(KERN_INFO "md: export_rdev(%s)\n",
2202                 bdevname(rdev->bdev,b));
2203         md_rdev_clear(rdev);
2204 #ifndef MODULE
2205         if (test_bit(AutoDetected, &rdev->flags))
2206                 md_autodetect_dev(rdev->bdev->bd_dev);
2207 #endif
2208         unlock_rdev(rdev);
2209         kobject_put(&rdev->kobj);
2210 }
2211
2212 void md_kick_rdev_from_array(struct md_rdev *rdev)
2213 {
2214         unbind_rdev_from_array(rdev);
2215         export_rdev(rdev);
2216 }
2217 EXPORT_SYMBOL_GPL(md_kick_rdev_from_array);
2218
2219 static void export_array(struct mddev *mddev)
2220 {
2221         struct md_rdev *rdev;
2222
2223         while (!list_empty(&mddev->disks)) {
2224                 rdev = list_first_entry(&mddev->disks, struct md_rdev,
2225                                         same_set);
2226                 md_kick_rdev_from_array(rdev);
2227         }
2228         mddev->raid_disks = 0;
2229         mddev->major_version = 0;
2230 }
2231
2232 static void sync_sbs(struct mddev *mddev, int nospares)
2233 {
2234         /* Update each superblock (in-memory image), but
2235          * if we are allowed to, skip spares which already
2236          * have the right event counter, or have one earlier
2237          * (which would mean they aren't being marked as dirty
2238          * with the rest of the array)
2239          */
2240         struct md_rdev *rdev;
2241         rdev_for_each(rdev, mddev) {
2242                 if (rdev->sb_events == mddev->events ||
2243                     (nospares &&
2244                      rdev->raid_disk < 0 &&
2245                      rdev->sb_events+1 == mddev->events)) {
2246                         /* Don't update this superblock */
2247                         rdev->sb_loaded = 2;
2248                 } else {
2249                         sync_super(mddev, rdev);
2250                         rdev->sb_loaded = 1;
2251                 }
2252         }
2253 }
2254
2255 static bool does_sb_need_changing(struct mddev *mddev)
2256 {
2257         struct md_rdev *rdev = NULL, *iter;
2258         struct mdp_superblock_1 *sb;
2259         int role;
2260
2261         /* Find a good rdev */
2262         rdev_for_each(iter, mddev)
2263                 if ((iter->raid_disk >= 0) && !test_bit(Faulty, &iter->flags)) {
2264                         rdev = iter;
2265                         break;
2266                 }
2267
2268         /* No good device found. */
2269         if (!rdev)
2270                 return false;
2271
2272         sb = page_address(rdev->sb_page);
2273         /* Check if a device has become faulty or a spare become active */
2274         rdev_for_each(rdev, mddev) {
2275                 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
2276                 /* Device activated? */
2277                 if (role == 0xffff && rdev->raid_disk >=0 &&
2278                     !test_bit(Faulty, &rdev->flags))
2279                         return true;
2280                 /* Device turned faulty? */
2281                 if (test_bit(Faulty, &rdev->flags) && (role < 0xfffd))
2282                         return true;
2283         }
2284
2285         /* Check if any mddev parameters have changed */
2286         if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
2287             (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
2288             (mddev->layout != le32_to_cpu(sb->layout)) ||
2289             (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
2290             (mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
2291                 return true;
2292
2293         return false;
2294 }
2295
2296 void md_update_sb(struct mddev *mddev, int force_change)
2297 {
2298         struct md_rdev *rdev;
2299         int sync_req;
2300         int nospares = 0;
2301         int any_badblocks_changed = 0;
2302         int ret = -1;
2303
2304         if (mddev->ro) {
2305                 if (force_change)
2306                         set_bit(MD_CHANGE_DEVS, &mddev->flags);
2307                 return;
2308         }
2309
2310 repeat:
2311         if (mddev_is_clustered(mddev)) {
2312                 if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
2313                         force_change = 1;
2314                 if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
2315                         nospares = 1;
2316                 ret = md_cluster_ops->metadata_update_start(mddev);
2317                 /* Has someone else has updated the sb */
2318                 if (!does_sb_need_changing(mddev)) {
2319                         if (ret == 0)
2320                                 md_cluster_ops->metadata_update_cancel(mddev);
2321                         bit_clear_unless(&mddev->flags, BIT(MD_CHANGE_PENDING),
2322                                                          BIT(MD_CHANGE_DEVS) |
2323                                                          BIT(MD_CHANGE_CLEAN));
2324                         return;
2325                 }
2326         }
2327
2328         /* First make sure individual recovery_offsets are correct */
2329         rdev_for_each(rdev, mddev) {
2330                 if (rdev->raid_disk >= 0 &&
2331                     mddev->delta_disks >= 0 &&
2332                     !test_bit(Journal, &rdev->flags) &&
2333                     !test_bit(In_sync, &rdev->flags) &&
2334                     mddev->curr_resync_completed > rdev->recovery_offset)
2335                                 rdev->recovery_offset = mddev->curr_resync_completed;
2336
2337         }
2338         if (!mddev->persistent) {
2339                 clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
2340                 clear_bit(MD_CHANGE_DEVS, &mddev->flags);
2341                 if (!mddev->external) {
2342                         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2343                         rdev_for_each(rdev, mddev) {
2344                                 if (rdev->badblocks.changed) {
2345                                         rdev->badblocks.changed = 0;
2346                                         ack_all_badblocks(&rdev->badblocks);
2347                                         md_error(mddev, rdev);
2348                                 }
2349                                 clear_bit(Blocked, &rdev->flags);
2350                                 clear_bit(BlockedBadBlocks, &rdev->flags);
2351                                 wake_up(&rdev->blocked_wait);
2352                         }
2353                 }
2354                 wake_up(&mddev->sb_wait);
2355                 return;
2356         }
2357
2358         spin_lock(&mddev->lock);
2359
2360         mddev->utime = ktime_get_real_seconds();
2361
2362         if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
2363                 force_change = 1;
2364         if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
2365                 /* just a clean<-> dirty transition, possibly leave spares alone,
2366                  * though if events isn't the right even/odd, we will have to do
2367                  * spares after all
2368                  */
2369                 nospares = 1;
2370         if (force_change)
2371                 nospares = 0;
2372         if (mddev->degraded)
2373                 /* If the array is degraded, then skipping spares is both
2374                  * dangerous and fairly pointless.
2375                  * Dangerous because a device that was removed from the array
2376                  * might have a event_count that still looks up-to-date,
2377                  * so it can be re-added without a resync.
2378                  * Pointless because if there are any spares to skip,
2379                  * then a recovery will happen and soon that array won't
2380                  * be degraded any more and the spare can go back to sleep then.
2381                  */
2382                 nospares = 0;
2383
2384         sync_req = mddev->in_sync;
2385
2386         /* If this is just a dirty<->clean transition, and the array is clean
2387          * and 'events' is odd, we can roll back to the previous clean state */
2388         if (nospares
2389             && (mddev->in_sync && mddev->recovery_cp == MaxSector)
2390             && mddev->can_decrease_events
2391             && mddev->events != 1) {
2392                 mddev->events--;
2393                 mddev->can_decrease_events = 0;
2394         } else {
2395                 /* otherwise we have to go forward and ... */
2396                 mddev->events ++;
2397                 mddev->can_decrease_events = nospares;
2398         }
2399
2400         /*
2401          * This 64-bit counter should never wrap.
2402          * Either we are in around ~1 trillion A.C., assuming
2403          * 1 reboot per second, or we have a bug...
2404          */
2405         WARN_ON(mddev->events == 0);
2406
2407         rdev_for_each(rdev, mddev) {
2408                 if (rdev->badblocks.changed)
2409                         any_badblocks_changed++;
2410                 if (test_bit(Faulty, &rdev->flags))
2411                         set_bit(FaultRecorded, &rdev->flags);
2412         }
2413
2414         sync_sbs(mddev, nospares);
2415         spin_unlock(&mddev->lock);
2416
2417         pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
2418                  mdname(mddev), mddev->in_sync);
2419
2420         bitmap_update_sb(mddev->bitmap);
2421         rdev_for_each(rdev, mddev) {
2422                 char b[BDEVNAME_SIZE];
2423
2424                 if (rdev->sb_loaded != 1)
2425                         continue; /* no noise on spare devices */
2426
2427                 if (!test_bit(Faulty, &rdev->flags)) {
2428                         md_super_write(mddev,rdev,
2429                                        rdev->sb_start, rdev->sb_size,
2430                                        rdev->sb_page);
2431                         pr_debug("md: (write) %s's sb offset: %llu\n",
2432                                  bdevname(rdev->bdev, b),
2433                                  (unsigned long long)rdev->sb_start);
2434                         rdev->sb_events = mddev->events;
2435                         if (rdev->badblocks.size) {
2436                                 md_super_write(mddev, rdev,
2437                                                rdev->badblocks.sector,
2438                                                rdev->badblocks.size << 9,
2439                                                rdev->bb_page);
2440                                 rdev->badblocks.size = 0;
2441                         }
2442
2443                 } else
2444                         pr_debug("md: %s (skipping faulty)\n",
2445                                  bdevname(rdev->bdev, b));
2446
2447                 if (mddev->level == LEVEL_MULTIPATH)
2448                         /* only need to write one superblock... */
2449                         break;
2450         }
2451         md_super_wait(mddev);
2452         /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
2453
2454         if (mddev_is_clustered(mddev) && ret == 0)
2455                 md_cluster_ops->metadata_update_finish(mddev);
2456
2457         if (mddev->in_sync != sync_req ||
2458             !bit_clear_unless(&mddev->flags, BIT(MD_CHANGE_PENDING),
2459                                BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_CLEAN)))
2460                 /* have to write it out again */
2461                 goto repeat;
2462         wake_up(&mddev->sb_wait);
2463         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
2464                 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
2465
2466         rdev_for_each(rdev, mddev) {
2467                 if (test_and_clear_bit(FaultRecorded, &rdev->flags))
2468                         clear_bit(Blocked, &rdev->flags);
2469
2470                 if (any_badblocks_changed)
2471                         ack_all_badblocks(&rdev->badblocks);
2472                 clear_bit(BlockedBadBlocks, &rdev->flags);
2473                 wake_up(&rdev->blocked_wait);
2474         }
2475 }
2476 EXPORT_SYMBOL(md_update_sb);
2477
2478 static int add_bound_rdev(struct md_rdev *rdev)
2479 {
2480         struct mddev *mddev = rdev->mddev;
2481         int err = 0;
2482         bool add_journal = test_bit(Journal, &rdev->flags);
2483
2484         if (!mddev->pers->hot_remove_disk || add_journal) {
2485                 /* If there is hot_add_disk but no hot_remove_disk
2486                  * then added disks for geometry changes,
2487                  * and should be added immediately.
2488                  */
2489                 super_types[mddev->major_version].
2490                         validate_super(mddev, rdev);
2491                 if (add_journal)
2492                         mddev_suspend(mddev);
2493                 err = mddev->pers->hot_add_disk(mddev, rdev);
2494                 if (add_journal)
2495                         mddev_resume(mddev);
2496                 if (err) {
2497                         md_kick_rdev_from_array(rdev);
2498                         return err;
2499                 }
2500         }
2501         sysfs_notify_dirent_safe(rdev->sysfs_state);
2502
2503         set_bit(MD_CHANGE_DEVS, &mddev->flags);
2504         if (mddev->degraded)
2505                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
2506         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2507         md_new_event(mddev);
2508         md_wakeup_thread(mddev->thread);
2509         return 0;
2510 }
2511
2512 /* words written to sysfs files may, or may not, be \n terminated.
2513  * We want to accept with case. For this we use cmd_match.
2514  */
2515 static int cmd_match(const char *cmd, const char *str)
2516 {
2517         /* See if cmd, written into a sysfs file, matches
2518          * str.  They must either be the same, or cmd can
2519          * have a trailing newline
2520          */
2521         while (*cmd && *str && *cmd == *str) {
2522                 cmd++;
2523                 str++;
2524         }
2525         if (*cmd == '\n')
2526                 cmd++;
2527         if (*str || *cmd)
2528                 return 0;
2529         return 1;
2530 }
2531
2532 struct rdev_sysfs_entry {
2533         struct attribute attr;
2534         ssize_t (*show)(struct md_rdev *, char *);
2535         ssize_t (*store)(struct md_rdev *, const char *, size_t);
2536 };
2537
2538 static ssize_t
2539 state_show(struct md_rdev *rdev, char *page)
2540 {
2541         char *sep = "";
2542         size_t len = 0;
2543         unsigned long flags = ACCESS_ONCE(rdev->flags);
2544
2545         if (test_bit(Faulty, &flags) ||
2546             rdev->badblocks.unacked_exist) {
2547                 len+= sprintf(page+len, "%sfaulty",sep);
2548                 sep = ",";
2549         }
2550         if (test_bit(In_sync, &flags)) {
2551                 len += sprintf(page+len, "%sin_sync",sep);
2552                 sep = ",";
2553         }
2554         if (test_bit(Journal, &flags)) {
2555                 len += sprintf(page+len, "%sjournal",sep);
2556                 sep = ",";
2557         }
2558         if (test_bit(WriteMostly, &flags)) {
2559                 len += sprintf(page+len, "%swrite_mostly",sep);
2560                 sep = ",";
2561         }
2562         if (test_bit(Blocked, &flags) ||
2563             (rdev->badblocks.unacked_exist
2564              && !test_bit(Faulty, &flags))) {
2565                 len += sprintf(page+len, "%sblocked", sep);
2566                 sep = ",";
2567         }
2568         if (!test_bit(Faulty, &flags) &&
2569             !test_bit(Journal, &flags) &&
2570             !test_bit(In_sync, &flags)) {
2571                 len += sprintf(page+len, "%sspare", sep);
2572                 sep = ",";
2573         }
2574         if (test_bit(WriteErrorSeen, &flags)) {
2575                 len += sprintf(page+len, "%swrite_error", sep);
2576                 sep = ",";
2577         }
2578         if (test_bit(WantReplacement, &flags)) {
2579                 len += sprintf(page+len, "%swant_replacement", sep);
2580                 sep = ",";
2581         }
2582         if (test_bit(Replacement, &flags)) {
2583                 len += sprintf(page+len, "%sreplacement", sep);
2584                 sep = ",";
2585         }
2586
2587         return len+sprintf(page+len, "\n");
2588 }
2589
2590 static ssize_t
2591 state_store(struct md_rdev *rdev, const char *buf, size_t len)
2592 {
2593         /* can write
2594          *  faulty  - simulates an error
2595          *  remove  - disconnects the device
2596          *  writemostly - sets write_mostly
2597          *  -writemostly - clears write_mostly
2598          *  blocked - sets the Blocked flags
2599          *  -blocked - clears the Blocked and possibly simulates an error
2600          *  insync - sets Insync providing device isn't active
2601          *  -insync - clear Insync for a device with a slot assigned,
2602          *            so that it gets rebuilt based on bitmap
2603          *  write_error - sets WriteErrorSeen
2604          *  -write_error - clears WriteErrorSeen
2605          */
2606         int err = -EINVAL;
2607         if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
2608                 md_error(rdev->mddev, rdev);
2609                 if (test_bit(Faulty, &rdev->flags))
2610                         err = 0;
2611                 else
2612                         err = -EBUSY;
2613         } else if (cmd_match(buf, "remove")) {
2614                 if (rdev->mddev->pers) {
2615                         clear_bit(Blocked, &rdev->flags);
2616                         remove_and_add_spares(rdev->mddev, rdev);
2617                 }
2618                 if (rdev->raid_disk >= 0)
2619                         err = -EBUSY;
2620                 else {
2621                         struct mddev *mddev = rdev->mddev;
2622                         err = 0;
2623                         if (mddev_is_clustered(mddev))
2624                                 err = md_cluster_ops->remove_disk(mddev, rdev);
2625
2626                         if (err == 0) {
2627                                 md_kick_rdev_from_array(rdev);
2628                                 if (mddev->pers)
2629                                         md_update_sb(mddev, 1);
2630                                 md_new_event(mddev);
2631                         }
2632                 }
2633         } else if (cmd_match(buf, "writemostly")) {
2634                 set_bit(WriteMostly, &rdev->flags);
2635                 err = 0;
2636         } else if (cmd_match(buf, "-writemostly")) {
2637                 clear_bit(WriteMostly, &rdev->flags);
2638                 err = 0;
2639         } else if (cmd_match(buf, "blocked")) {
2640                 set_bit(Blocked, &rdev->flags);
2641                 err = 0;
2642         } else if (cmd_match(buf, "-blocked")) {
2643                 if (!test_bit(Faulty, &rdev->flags) &&
2644                     rdev->badblocks.unacked_exist) {
2645                         /* metadata handler doesn't understand badblocks,
2646                          * so we need to fail the device
2647                          */
2648                         md_error(rdev->mddev, rdev);
2649                 }
2650                 clear_bit(Blocked, &rdev->flags);
2651                 clear_bit(BlockedBadBlocks, &rdev->flags);
2652                 wake_up(&rdev->blocked_wait);
2653                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2654                 md_wakeup_thread(rdev->mddev->thread);
2655
2656                 err = 0;
2657         } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
2658                 set_bit(In_sync, &rdev->flags);
2659                 err = 0;
2660         } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 &&
2661                    !test_bit(Journal, &rdev->flags)) {
2662                 if (rdev->mddev->pers == NULL) {
2663                         clear_bit(In_sync, &rdev->flags);
2664                         rdev->saved_raid_disk = rdev->raid_disk;
2665                         rdev->raid_disk = -1;
2666                         err = 0;
2667                 }
2668         } else if (cmd_match(buf, "write_error")) {
2669                 set_bit(WriteErrorSeen, &rdev->flags);
2670                 err = 0;
2671         } else if (cmd_match(buf, "-write_error")) {
2672                 clear_bit(WriteErrorSeen, &rdev->flags);
2673                 err = 0;
2674         } else if (cmd_match(buf, "want_replacement")) {
2675                 /* Any non-spare device that is not a replacement can
2676                  * become want_replacement at any time, but we then need to
2677                  * check if recovery is needed.
2678                  */
2679                 if (rdev->raid_disk >= 0 &&
2680                     !test_bit(Journal, &rdev->flags) &&
2681                     !test_bit(Replacement, &rdev->flags))
2682                         set_bit(WantReplacement, &rdev->flags);
2683                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2684                 md_wakeup_thread(rdev->mddev->thread);
2685                 err = 0;
2686         } else if (cmd_match(buf, "-want_replacement")) {
2687                 /* Clearing 'want_replacement' is always allowed.
2688                  * Once replacements starts it is too late though.
2689                  */
2690                 err = 0;
2691                 clear_bit(WantReplacement, &rdev->flags);
2692         } else if (cmd_match(buf, "replacement")) {
2693                 /* Can only set a device as a replacement when array has not
2694                  * yet been started.  Once running, replacement is automatic
2695                  * from spares, or by assigning 'slot'.
2696                  */
2697                 if (rdev->mddev->pers)
2698                         err = -EBUSY;
2699                 else {
2700                         set_bit(Replacement, &rdev->flags);
2701                         err = 0;
2702                 }
2703         } else if (cmd_match(buf, "-replacement")) {
2704                 /* Similarly, can only clear Replacement before start */
2705                 if (rdev->mddev->pers)
2706                         err = -EBUSY;
2707                 else {
2708                         clear_bit(Replacement, &rdev->flags);
2709                         err = 0;
2710                 }
2711         } else if (cmd_match(buf, "re-add")) {
2712                 if (!rdev->mddev->pers)
2713                         err = -EINVAL;
2714                 else if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
2715                                 rdev->saved_raid_disk >= 0) {
2716                         /* clear_bit is performed _after_ all the devices
2717                          * have their local Faulty bit cleared. If any writes
2718                          * happen in the meantime in the local node, they
2719                          * will land in the local bitmap, which will be synced
2720                          * by this node eventually
2721                          */
2722                         if (!mddev_is_clustered(rdev->mddev) ||
2723                             (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) {
2724                                 clear_bit(Faulty, &rdev->flags);
2725                                 err = add_bound_rdev(rdev);
2726                         }
2727                 } else
2728                         err = -EBUSY;
2729         }
2730         if (!err)
2731                 sysfs_notify_dirent_safe(rdev->sysfs_state);
2732         return err ? err : len;
2733 }
2734 static struct rdev_sysfs_entry rdev_state =
2735 __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
2736
2737 static ssize_t
2738 errors_show(struct md_rdev *rdev, char *page)
2739 {
2740         return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
2741 }
2742
2743 static ssize_t
2744 errors_store(struct md_rdev *rdev, const char *buf, size_t len)
2745 {
2746         unsigned int n;
2747         int rv;
2748
2749         rv = kstrtouint(buf, 10, &n);
2750         if (rv < 0)
2751                 return rv;
2752         atomic_set(&rdev->corrected_errors, n);
2753         return len;
2754 }
2755 static struct rdev_sysfs_entry rdev_errors =
2756 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
2757
2758 static ssize_t
2759 slot_show(struct md_rdev *rdev, char *page)
2760 {
2761         if (test_bit(Journal, &rdev->flags))
2762                 return sprintf(page, "journal\n");
2763         else if (rdev->raid_disk < 0)
2764                 return sprintf(page, "none\n");
2765         else
2766                 return sprintf(page, "%d\n", rdev->raid_disk);
2767 }
2768
2769 static ssize_t
2770 slot_store(struct md_rdev *rdev, const char *buf, size_t len)
2771 {
2772         int slot;
2773         int err;
2774
2775         if (test_bit(Journal, &rdev->flags))
2776                 return -EBUSY;
2777         if (strncmp(buf, "none", 4)==0)
2778                 slot = -1;
2779         else {
2780                 err = kstrtouint(buf, 10, (unsigned int *)&slot);
2781                 if (err < 0)
2782                         return err;
2783         }
2784         if (rdev->mddev->pers && slot == -1) {
2785                 /* Setting 'slot' on an active array requires also
2786                  * updating the 'rd%d' link, and communicating
2787                  * with the personality with ->hot_*_disk.
2788                  * For now we only support removing
2789                  * failed/spare devices.  This normally happens automatically,
2790                  * but not when the metadata is externally managed.
2791                  */
2792                 if (rdev->raid_disk == -1)
2793                         return -EEXIST;
2794                 /* personality does all needed checks */
2795                 if (rdev->mddev->pers->hot_remove_disk == NULL)
2796                         return -EINVAL;
2797                 clear_bit(Blocked, &rdev->flags);
2798                 remove_and_add_spares(rdev->mddev, rdev);
2799                 if (rdev->raid_disk >= 0)
2800                         return -EBUSY;
2801                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2802                 md_wakeup_thread(rdev->mddev->thread);
2803         } else if (rdev->mddev->pers) {
2804                 /* Activating a spare .. or possibly reactivating
2805                  * if we ever get bitmaps working here.
2806                  */
2807                 int err;
2808
2809                 if (rdev->raid_disk != -1)
2810                         return -EBUSY;
2811
2812                 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
2813                         return -EBUSY;
2814
2815                 if (rdev->mddev->pers->hot_add_disk == NULL)
2816                         return -EINVAL;
2817
2818                 if (slot >= rdev->mddev->raid_disks &&
2819                     slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
2820                         return -ENOSPC;
2821
2822                 rdev->raid_disk = slot;
2823                 if (test_bit(In_sync, &rdev->flags))
2824                         rdev->saved_raid_disk = slot;
2825                 else
2826                         rdev->saved_raid_disk = -1;
2827                 clear_bit(In_sync, &rdev->flags);
2828                 clear_bit(Bitmap_sync, &rdev->flags);
2829                 err = rdev->mddev->pers->
2830                         hot_add_disk(rdev->mddev, rdev);
2831                 if (err) {
2832                         rdev->raid_disk = -1;
2833                         return err;
2834                 } else
2835                         sysfs_notify_dirent_safe(rdev->sysfs_state);
2836                 if (sysfs_link_rdev(rdev->mddev, rdev))
2837                         /* failure here is OK */;
2838                 /* don't wakeup anyone, leave that to userspace. */
2839         } else {
2840                 if (slot >= rdev->mddev->raid_disks &&
2841                     slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
2842                         return -ENOSPC;
2843                 rdev->raid_disk = slot;
2844                 /* assume it is working */
2845                 clear_bit(Faulty, &rdev->flags);
2846                 clear_bit(WriteMostly, &rdev->flags);
2847                 set_bit(In_sync, &rdev->flags);
2848                 sysfs_notify_dirent_safe(rdev->sysfs_state);
2849         }
2850         return len;
2851 }
2852
2853 static struct rdev_sysfs_entry rdev_slot =
2854 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
2855
2856 static ssize_t
2857 offset_show(struct md_rdev *rdev, char *page)
2858 {
2859         return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
2860 }
2861
2862 static ssize_t
2863 offset_store(struct md_rdev *rdev, const char *buf, size_t len)
2864 {
2865         unsigned long long offset;
2866         if (kstrtoull(buf, 10, &offset) < 0)
2867                 return -EINVAL;
2868         if (rdev->mddev->pers && rdev->raid_disk >= 0)
2869                 return -EBUSY;
2870         if (rdev->sectors && rdev->mddev->external)
2871                 /* Must set offset before size, so overlap checks
2872                  * can be sane */
2873                 return -EBUSY;
2874         rdev->data_offset = offset;
2875         rdev->new_data_offset = offset;
2876         return len;
2877 }
2878
2879 static struct rdev_sysfs_entry rdev_offset =
2880 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
2881
2882 static ssize_t new_offset_show(struct md_rdev *rdev, char *page)
2883 {
2884         return sprintf(page, "%llu\n",
2885                        (unsigned long long)rdev->new_data_offset);
2886 }
2887
2888 static ssize_t new_offset_store(struct md_rdev *rdev,
2889                                 const char *buf, size_t len)
2890 {
2891         unsigned long long new_offset;
2892         struct mddev *mddev = rdev->mddev;
2893
2894         if (kstrtoull(buf, 10, &new_offset) < 0)
2895                 return -EINVAL;
2896
2897         if (mddev->sync_thread ||
2898             test_bit(MD_RECOVERY_RUNNING,&mddev->recovery))
2899                 return -EBUSY;
2900         if (new_offset == rdev->data_offset)
2901                 /* reset is always permitted */
2902                 ;
2903         else if (new_offset > rdev->data_offset) {
2904                 /* must not push array size beyond rdev_sectors */
2905                 if (new_offset - rdev->data_offset
2906                     + mddev->dev_sectors > rdev->sectors)
2907                                 return -E2BIG;
2908         }
2909         /* Metadata worries about other space details. */
2910
2911         /* decreasing the offset is inconsistent with a backwards
2912          * reshape.
2913          */
2914         if (new_offset < rdev->data_offset &&
2915             mddev->reshape_backwards)
2916                 return -EINVAL;
2917         /* Increasing offset is inconsistent with forwards
2918          * reshape.  reshape_direction should be set to
2919          * 'backwards' first.
2920          */
2921         if (new_offset > rdev->data_offset &&
2922             !mddev->reshape_backwards)
2923                 return -EINVAL;
2924
2925         if (mddev->pers && mddev->persistent &&
2926             !super_types[mddev->major_version]
2927             .allow_new_offset(rdev, new_offset))
2928                 return -E2BIG;
2929         rdev->new_data_offset = new_offset;
2930         if (new_offset > rdev->data_offset)
2931                 mddev->reshape_backwards = 1;
2932         else if (new_offset < rdev->data_offset)
2933                 mddev->reshape_backwards = 0;
2934
2935         return len;
2936 }
2937 static struct rdev_sysfs_entry rdev_new_offset =
2938 __ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store);
2939
2940 static ssize_t
2941 rdev_size_show(struct md_rdev *rdev, char *page)
2942 {
2943         return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
2944 }
2945
2946 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
2947 {
2948         /* check if two start/length pairs overlap */
2949         if (s1+l1 <= s2)
2950                 return 0;
2951         if (s2+l2 <= s1)
2952                 return 0;
2953         return 1;
2954 }
2955
2956 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
2957 {
2958         unsigned long long blocks;
2959         sector_t new;
2960
2961         if (kstrtoull(buf, 10, &blocks) < 0)
2962                 return -EINVAL;
2963
2964         if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
2965                 return -EINVAL; /* sector conversion overflow */
2966
2967         new = blocks * 2;
2968         if (new != blocks * 2)
2969                 return -EINVAL; /* unsigned long long to sector_t overflow */
2970
2971         *sectors = new;
2972         return 0;
2973 }
2974
2975 static ssize_t
2976 rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
2977 {
2978         struct mddev *my_mddev = rdev->mddev;
2979         sector_t oldsectors = rdev->sectors;
2980         sector_t sectors;
2981
2982         if (test_bit(Journal, &rdev->flags))
2983                 return -EBUSY;
2984         if (strict_blocks_to_sectors(buf, &sectors) < 0)
2985                 return -EINVAL;
2986         if (rdev->data_offset != rdev->new_data_offset)
2987                 return -EINVAL; /* too confusing */
2988         if (my_mddev->pers && rdev->raid_disk >= 0) {
2989                 if (my_mddev->persistent) {
2990                         sectors = super_types[my_mddev->major_version].
2991                                 rdev_size_change(rdev, sectors);
2992                         if (!sectors)
2993                                 return -EBUSY;
2994                 } else if (!sectors)
2995                         sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
2996                                 rdev->data_offset;
2997                 if (!my_mddev->pers->resize)
2998                         /* Cannot change size for RAID0 or Linear etc */
2999                         return -EINVAL;
3000         }
3001         if (sectors < my_mddev->dev_sectors)
3002                 return -EINVAL; /* component must fit device */
3003
3004         rdev->sectors = sectors;
3005         if (sectors > oldsectors && my_mddev->external) {
3006                 /* Need to check that all other rdevs with the same
3007                  * ->bdev do not overlap.  'rcu' is sufficient to walk
3008                  * the rdev lists safely.
3009                  * This check does not provide a hard guarantee, it
3010                  * just helps avoid dangerous mistakes.
3011                  */
3012                 struct mddev *mddev;
3013                 int overlap = 0;
3014                 struct list_head *tmp;
3015
3016                 rcu_read_lock();
3017                 for_each_mddev(mddev, tmp) {
3018                         struct md_rdev *rdev2;
3019
3020                         rdev_for_each(rdev2, mddev)
3021                                 if (rdev->bdev == rdev2->bdev &&
3022                                     rdev != rdev2 &&
3023                                     overlaps(rdev->data_offset, rdev->sectors,
3024                                              rdev2->data_offset,
3025                                              rdev2->sectors)) {
3026                                         overlap = 1;
3027                                         break;
3028                                 }
3029                         if (overlap) {
3030                                 mddev_put(mddev);
3031                                 break;
3032                         }
3033                 }
3034                 rcu_read_unlock();
3035                 if (overlap) {
3036                         /* Someone else could have slipped in a size
3037                          * change here, but doing so is just silly.
3038                          * We put oldsectors back because we *know* it is
3039                          * safe, and trust userspace not to race with
3040                          * itself
3041                          */
3042                         rdev->sectors = oldsectors;
3043                         return -EBUSY;
3044                 }
3045         }
3046         return len;
3047 }
3048
3049 static struct rdev_sysfs_entry rdev_size =
3050 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
3051
3052 static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
3053 {
3054         unsigned long long recovery_start = rdev->recovery_offset;
3055
3056         if (test_bit(In_sync, &rdev->flags) ||
3057             recovery_start == MaxSector)
3058                 return sprintf(page, "none\n");
3059
3060         return sprintf(page, "%llu\n", recovery_start);
3061 }
3062
3063 static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len)
3064 {
3065         unsigned long long recovery_start;
3066
3067         if (cmd_match(buf, "none"))
3068                 recovery_start = MaxSector;
3069         else if (kstrtoull(buf, 10, &recovery_start))
3070                 return -EINVAL;
3071
3072         if (rdev->mddev->pers &&
3073             rdev->raid_disk >= 0)
3074                 return -EBUSY;
3075
3076         rdev->recovery_offset = recovery_start;
3077         if (recovery_start == MaxSector)
3078                 set_bit(In_sync, &rdev->flags);
3079         else
3080                 clear_bit(In_sync, &rdev->flags);
3081         return len;
3082 }
3083
3084 static struct rdev_sysfs_entry rdev_recovery_start =
3085 __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
3086
3087 /* sysfs access to bad-blocks list.
3088  * We present two files.
3089  * 'bad-blocks' lists sector numbers and lengths of ranges that
3090  *    are recorded as bad.  The list is truncated to fit within
3091  *    the one-page limit of sysfs.
3092  *    Writing "sector length" to this file adds an acknowledged
3093  *    bad block list.
3094  * 'unacknowledged-bad-blocks' lists bad blocks that have not yet
3095  *    been acknowledged.  Writing to this file adds bad blocks
3096  *    without acknowledging them.  This is largely for testing.
3097  */
3098 static ssize_t bb_show(struct md_rdev *rdev, char *page)
3099 {
3100         return badblocks_show(&rdev->badblocks, page, 0);
3101 }
3102 static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
3103 {
3104         int rv = badblocks_store(&rdev->badblocks, page, len, 0);
3105         /* Maybe that ack was all we needed */
3106         if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
3107                 wake_up(&rdev->blocked_wait);
3108         return rv;
3109 }
3110 static struct rdev_sysfs_entry rdev_bad_blocks =
3111 __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
3112
3113 static ssize_t ubb_show(struct md_rdev *rdev, char *page)
3114 {
3115         return badblocks_show(&rdev->badblocks, page, 1);
3116 }
3117 static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
3118 {
3119         return badblocks_store(&rdev->badblocks, page, len, 1);
3120 }
3121 static struct rdev_sysfs_entry rdev_unack_bad_blocks =
3122 __ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);
3123
3124 static struct attribute *rdev_default_attrs[] = {
3125         &rdev_state.attr,
3126         &rdev_errors.attr,
3127         &rdev_slot.attr,
3128         &rdev_offset.attr,
3129         &rdev_new_offset.attr,
3130         &rdev_size.attr,
3131         &rdev_recovery_start.attr,
3132         &rdev_bad_blocks.attr,
3133         &rdev_unack_bad_blocks.attr,
3134         NULL,
3135 };
3136 static ssize_t
3137 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
3138 {
3139         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
3140         struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3141
3142         if (!entry->show)
3143                 return -EIO;
3144         if (!rdev->mddev)
3145                 return -EBUSY;
3146         return entry->show(rdev, page);
3147 }
3148
3149 static ssize_t
3150 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
3151               const char *page, size_t length)
3152 {
3153         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
3154         struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3155         ssize_t rv;
3156         struct mddev *mddev = rdev->mddev;
3157
3158         if (!entry->store)
3159                 return -EIO;
3160         if (!capable(CAP_SYS_ADMIN))
3161                 return -EACCES;
3162         rv = mddev ? mddev_lock(mddev): -EBUSY;
3163         if (!rv) {
3164                 if (rdev->mddev == NULL)
3165                         rv = -EBUSY;
3166                 else
3167                         rv = entry->store(rdev, page, length);
3168                 mddev_unlock(mddev);
3169         }
3170         return rv;
3171 }
3172
3173 static void rdev_free(struct kobject *ko)
3174 {
3175         struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj);
3176         kfree(rdev);
3177 }
3178 static const struct sysfs_ops rdev_sysfs_ops = {
3179         .show           = rdev_attr_show,
3180         .store          = rdev_attr_store,
3181 };
3182 static struct kobj_type rdev_ktype = {
3183         .release        = rdev_free,
3184         .sysfs_ops      = &rdev_sysfs_ops,
3185         .default_attrs  = rdev_default_attrs,
3186 };
3187
3188 int md_rdev_init(struct md_rdev *rdev)
3189 {
3190         rdev->desc_nr = -1;
3191         rdev->saved_raid_disk = -1;
3192         rdev->raid_disk = -1;
3193         rdev->flags = 0;
3194         rdev->data_offset = 0;
3195         rdev->new_data_offset = 0;
3196         rdev->sb_events = 0;
3197         rdev->last_read_error = 0;
3198         rdev->sb_loaded = 0;
3199         rdev->bb_page = NULL;
3200         atomic_set(&rdev->nr_pending, 0);
3201         atomic_set(&rdev->read_errors, 0);
3202         atomic_set(&rdev->corrected_errors, 0);
3203
3204         INIT_LIST_HEAD(&rdev->same_set);
3205         init_waitqueue_head(&rdev->blocked_wait);
3206
3207         /* Add space to store bad block list.
3208          * This reserves the space even on arrays where it cannot
3209          * be used - I wonder if that matters
3210          */
3211         return badblocks_init(&rdev->badblocks, 0);
3212 }
3213 EXPORT_SYMBOL_GPL(md_rdev_init);
3214 /*
3215  * Import a device. If 'super_format' >= 0, then sanity check the superblock
3216  *
3217  * mark the device faulty if:
3218  *
3219  *   - the device is nonexistent (zero size)
3220  *   - the device has no valid superblock
3221  *
3222  * a faulty rdev _never_ has rdev->sb set.
3223  */
3224 static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
3225 {
3226         char b[BDEVNAME_SIZE];
3227         int err;
3228         struct md_rdev *rdev;
3229         sector_t size;
3230
3231         rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
3232         if (!rdev) {
3233                 printk(KERN_ERR "md: could not alloc mem for new device!\n");
3234                 return ERR_PTR(-ENOMEM);
3235         }
3236
3237         err = md_rdev_init(rdev);
3238         if (err)
3239                 goto abort_free;
3240         err = alloc_disk_sb(rdev);
3241         if (err)
3242                 goto abort_free;
3243
3244         err = lock_rdev(rdev, newdev, super_format == -2);
3245         if (err)
3246                 goto abort_free;
3247
3248         kobject_init(&rdev->kobj, &rdev_ktype);
3249
3250         size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
3251         if (!size) {
3252                 printk(KERN_WARNING
3253                         "md: %s has zero or unknown size, marking faulty!\n",
3254                         bdevname(rdev->bdev,b));
3255                 err = -EINVAL;
3256                 goto abort_free;
3257         }
3258
3259         if (super_format >= 0) {
3260                 err = super_types[super_format].
3261                         load_super(rdev, NULL, super_minor);
3262                 if (err == -EINVAL) {
3263                         printk(KERN_WARNING
3264                                 "md: %s does not have a valid v%d.%d "
3265                                "superblock, not importing!\n",
3266                                 bdevname(rdev->bdev,b),
3267                                super_format, super_minor);
3268                         goto abort_free;
3269                 }
3270                 if (err < 0) {
3271                         printk(KERN_WARNING
3272                                 "md: could not read %s's sb, not importing!\n",
3273                                 bdevname(rdev->bdev,b));
3274                         goto abort_free;
3275                 }
3276         }
3277
3278         return rdev;
3279
3280 abort_free:
3281         if (rdev->bdev)
3282                 unlock_rdev(rdev);
3283         md_rdev_clear(rdev);
3284         kfree(rdev);
3285         return ERR_PTR(err);
3286 }
3287
3288 /*
3289  * Check a full RAID array for plausibility
3290  */
3291
3292 static void analyze_sbs(struct mddev *mddev)
3293 {
3294         int i;
3295         struct md_rdev *rdev, *freshest, *tmp;
3296         char b[BDEVNAME_SIZE];
3297
3298         freshest = NULL;
3299         rdev_for_each_safe(rdev, tmp, mddev)
3300                 switch (super_types[mddev->major_version].
3301                         load_super(rdev, freshest, mddev->minor_version)) {
3302                 case 1:
3303                         freshest = rdev;
3304                         break;
3305                 case 0:
3306                         break;
3307                 default:
3308                         printk( KERN_ERR \
3309                                 "md: fatal superblock inconsistency in %s"
3310                                 " -- removing from array\n",
3311                                 bdevname(rdev->bdev,b));
3312                         md_kick_rdev_from_array(rdev);
3313                 }
3314
3315         super_types[mddev->major_version].
3316                 validate_super(mddev, freshest);
3317
3318         i = 0;
3319         rdev_for_each_safe(rdev, tmp, mddev) {
3320                 if (mddev->max_disks &&
3321                     (rdev->desc_nr >= mddev->max_disks ||
3322                      i > mddev->max_disks)) {
3323                         printk(KERN_WARNING
3324                                "md: %s: %s: only %d devices permitted\n",
3325                                mdname(mddev), bdevname(rdev->bdev, b),
3326                                mddev->max_disks);
3327                         md_kick_rdev_from_array(rdev);
3328                         continue;
3329                 }
3330                 if (rdev != freshest) {
3331                         if (super_types[mddev->major_version].
3332                             validate_super(mddev, rdev)) {
3333                                 printk(KERN_WARNING "md: kicking non-fresh %s"
3334                                         " from array!\n",
3335                                         bdevname(rdev->bdev,b));
3336                                 md_kick_rdev_from_array(rdev);
3337                                 continue;
3338                         }
3339                 }
3340                 if (mddev->level == LEVEL_MULTIPATH) {
3341                         rdev->desc_nr = i++;
3342                         rdev->raid_disk = rdev->desc_nr;
3343                         set_bit(In_sync, &rdev->flags);
3344                 } else if (rdev->raid_disk >=
3345                             (mddev->raid_disks - min(0, mddev->delta_disks)) &&
3346                            !test_bit(Journal, &rdev->flags)) {
3347                         rdev->raid_disk = -1;
3348                         clear_bit(In_sync, &rdev->flags);
3349                 }
3350         }
3351 }
3352
3353 /* Read a fixed-point number.
3354  * Numbers in sysfs attributes should be in "standard" units where
3355  * possible, so time should be in seconds.
3356  * However we internally use a a much smaller unit such as
3357  * milliseconds or jiffies.
3358  * This function takes a decimal number with a possible fractional
3359  * component, and produces an integer which is the result of
3360  * multiplying that number by 10^'scale'.
3361  * all without any floating-point arithmetic.
3362  */
3363 int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
3364 {
3365         unsigned long result = 0;
3366         long decimals = -1;
3367         while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
3368                 if (*cp == '.')
3369                         decimals = 0;
3370                 else if (decimals < scale) {
3371                         unsigned int value;
3372                         value = *cp - '0';
3373                         result = result * 10 + value;
3374                         if (decimals >= 0)
3375                                 decimals++;
3376                 }
3377                 cp++;
3378         }
3379         if (*cp == '\n')
3380                 cp++;
3381         if (*cp)
3382                 return -EINVAL;
3383         if (decimals < 0)
3384                 decimals = 0;
3385         while (decimals < scale) {
3386                 result *= 10;
3387                 decimals ++;
3388         }
3389         *res = result;
3390         return 0;
3391 }
3392
3393 static ssize_t
3394 safe_delay_show(struct mddev *mddev, char *page)
3395 {
3396         int msec = (mddev->safemode_delay*1000)/HZ;
3397         return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
3398 }
3399 static ssize_t
3400 safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
3401 {
3402         unsigned long msec;
3403
3404         if (mddev_is_clustered(mddev)) {
3405                 pr_info("md: Safemode is disabled for clustered mode\n");
3406                 return -EINVAL;
3407         }
3408
3409         if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
3410                 return -EINVAL;
3411         if (msec == 0)
3412                 mddev->safemode_delay = 0;
3413         else {
3414                 unsigned long old_delay = mddev->safemode_delay;
3415                 unsigned long new_delay = (msec*HZ)/1000;
3416
3417                 if (new_delay == 0)
3418                         new_delay = 1;
3419                 mddev->safemode_delay = new_delay;
3420                 if (new_delay < old_delay || old_delay == 0)
3421                         mod_timer(&mddev->safemode_timer, jiffies+1);
3422         }
3423         return len;
3424 }
3425 static struct md_sysfs_entry md_safe_delay =
3426 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
3427
3428 static ssize_t
3429 level_show(struct mddev *mddev, char *page)
3430 {
3431         struct md_personality *p;
3432         int ret;
3433         spin_lock(&mddev->lock);
3434         p = mddev->pers;
3435         if (p)
3436                 ret = sprintf(page, "%s\n", p->name);
3437         else if (mddev->clevel[0])
3438                 ret = sprintf(page, "%s\n", mddev->clevel);
3439         else if (mddev->level != LEVEL_NONE)
3440                 ret = sprintf(page, "%d\n", mddev->level);
3441         else
3442                 ret = 0;
3443         spin_unlock(&mddev->lock);
3444         return ret;
3445 }
3446
3447 static ssize_t
3448 level_store(struct mddev *mddev, const char *buf, size_t len)
3449 {
3450         char clevel[16];
3451         ssize_t rv;
3452         size_t slen = len;
3453         struct md_personality *pers, *oldpers;
3454         long level;
3455         void *priv, *oldpriv;
3456         struct md_rdev *rdev;
3457
3458         if (slen == 0 || slen >= sizeof(clevel))
3459                 return -EINVAL;
3460
3461         rv = mddev_lock(mddev);
3462         if (rv)
3463                 return rv;
3464
3465         if (mddev->pers == NULL) {
3466                 strncpy(mddev->clevel, buf, slen);
3467                 if (mddev->clevel[slen-1] == '\n')
3468                         slen--;
3469                 mddev->clevel[slen] = 0;
3470                 mddev->level = LEVEL_NONE;
3471                 rv = len;
3472                 goto out_unlock;
3473         }
3474         rv = -EROFS;
3475         if (mddev->ro)
3476                 goto out_unlock;
3477
3478         /* request to change the personality.  Need to ensure:
3479          *  - array is not engaged in resync/recovery/reshape
3480          *  - old personality can be suspended
3481          *  - new personality will access other array.
3482          */
3483
3484         rv = -EBUSY;
3485         if (mddev->sync_thread ||
3486             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3487             mddev->reshape_position != MaxSector ||
3488             mddev->sysfs_active)
3489                 goto out_unlock;
3490
3491         rv = -EINVAL;
3492         if (!mddev->pers->quiesce) {
3493                 printk(KERN_WARNING "md: %s: %s does not support online personality change\n",
3494                        mdname(mddev), mddev->pers->name);
3495                 goto out_unlock;
3496         }
3497
3498         /* Now find the new personality */
3499         strncpy(clevel, buf, slen);
3500         if (clevel[slen-1] == '\n')
3501                 slen--;
3502         clevel[slen] = 0;
3503         if (kstrtol(clevel, 10, &level))
3504                 level = LEVEL_NONE;
3505
3506         if (request_module("md-%s", clevel) != 0)
3507                 request_module("md-level-%s", clevel);
3508         spin_lock(&pers_lock);
3509         pers = find_pers(level, clevel);
3510         if (!pers || !try_module_get(pers->owner)) {
3511                 spin_unlock(&pers_lock);
3512                 printk(KERN_WARNING "md: personality %s not loaded\n", clevel);
3513                 rv = -EINVAL;
3514                 goto out_unlock;
3515         }
3516         spin_unlock(&pers_lock);
3517
3518         if (pers == mddev->pers) {
3519                 /* Nothing to do! */
3520                 module_put(pers->owner);
3521                 rv = len;
3522                 goto out_unlock;
3523         }
3524         if (!pers->takeover) {
3525                 module_put(pers->owner);
3526                 printk(KERN_WARNING "md: %s: %s does not support personality takeover\n",
3527                        mdname(mddev), clevel);
3528                 rv = -EINVAL;
3529                 goto out_unlock;
3530         }
3531
3532         rdev_for_each(rdev, mddev)
3533                 rdev->new_raid_disk = rdev->raid_disk;
3534
3535         /* ->takeover must set new_* and/or delta_disks
3536          * if it succeeds, and may set them when it fails.
3537          */
3538         priv = pers->takeover(mddev);
3539         if (IS_ERR(priv)) {
3540                 mddev->new_level = mddev->level;
3541                 mddev->new_layout = mddev->layout;
3542                 mddev->new_chunk_sectors = mddev->chunk_sectors;
3543                 mddev->raid_disks -= mddev->delta_disks;
3544                 mddev->delta_disks = 0;
3545                 mddev->reshape_backwards = 0;
3546                 module_put(pers->owner);
3547                 printk(KERN_WARNING "md: %s: %s would not accept array\n",
3548                        mdname(mddev), clevel);
3549                 rv = PTR_ERR(priv);
3550                 goto out_unlock;
3551         }
3552
3553         /* Looks like we have a winner */
3554         mddev_suspend(mddev);
3555         mddev_detach(mddev);
3556
3557         spin_lock(&mddev->lock);
3558         oldpers = mddev->pers;
3559         oldpriv = mddev->private;
3560         mddev->pers = pers;
3561         mddev->private = priv;
3562         strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
3563         mddev->level = mddev->new_level;
3564         mddev->layout = mddev->new_layout;
3565         mddev->chunk_sectors = mddev->new_chunk_sectors;
3566         mddev->delta_disks = 0;
3567         mddev->reshape_backwards = 0;
3568         mddev->degraded = 0;
3569         spin_unlock(&mddev->lock);
3570
3571         if (oldpers->sync_request == NULL &&
3572             mddev->external) {
3573                 /* We are converting from a no-redundancy array
3574                  * to a redundancy array and metadata is managed
3575                  * externally so we need to be sure that writes
3576                  * won't block due to a need to transition
3577                  *      clean->dirty
3578                  * until external management is started.
3579                  */
3580                 mddev->in_sync = 0;
3581                 mddev->safemode_delay = 0;
3582                 mddev->safemode = 0;
3583         }
3584
3585         oldpers->free(mddev, oldpriv);
3586
3587         if (oldpers->sync_request == NULL &&
3588             pers->sync_request != NULL) {
3589                 /* need to add the md_redundancy_group */
3590                 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
3591                         printk(KERN_WARNING
3592                                "md: cannot register extra attributes for %s\n",
3593                                mdname(mddev));
3594                 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
3595         }
3596         if (oldpers->sync_request != NULL &&
3597             pers->sync_request == NULL) {
3598                 /* need to remove the md_redundancy_group */
3599                 if (mddev->to_remove == NULL)
3600                         mddev->to_remove = &md_redundancy_group;
3601         }
3602
3603         module_put(oldpers->owner);
3604
3605         rdev_for_each(rdev, mddev) {
3606                 if (rdev->raid_disk < 0)
3607                         continue;
3608                 if (rdev->new_raid_disk >= mddev->raid_disks)
3609                         rdev->new_raid_disk = -1;
3610                 if (rdev->new_raid_disk == rdev->raid_disk)
3611                         continue;
3612                 sysfs_unlink_rdev(mddev, rdev);
3613         }
3614         rdev_for_each(rdev, mddev) {
3615                 if (rdev->raid_disk < 0)
3616                         continue;
3617                 if (rdev->new_raid_disk == rdev->raid_disk)
3618                         continue;
3619                 rdev->raid_disk = rdev->new_raid_disk;
3620                 if (rdev->raid_disk < 0)
3621                         clear_bit(In_sync, &rdev->flags);
3622                 else {
3623                         if (sysfs_link_rdev(mddev, rdev))
3624                                 printk(KERN_WARNING "md: cannot register rd%d"
3625                                        " for %s after level change\n",
3626                                        rdev->raid_disk, mdname(mddev));
3627                 }
3628         }
3629
3630         if (pers->sync_request == NULL) {
3631                 /* this is now an array without redundancy, so
3632                  * it must always be in_sync
3633                  */
3634                 mddev->in_sync = 1;
3635                 del_timer_sync(&mddev->safemode_timer);
3636         }
3637         blk_set_stacking_limits(&mddev->queue->limits);
3638         pers->run(mddev);
3639         set_bit(MD_CHANGE_DEVS, &mddev->flags);
3640         mddev_resume(mddev);
3641         if (!mddev->thread)
3642                 md_update_sb(mddev, 1);
3643         sysfs_notify(&mddev->kobj, NULL, "level");
3644         md_new_event(mddev);
3645         rv = len;
3646 out_unlock:
3647         mddev_unlock(mddev);
3648         return rv;
3649 }
3650
3651 static struct md_sysfs_entry md_level =
3652 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
3653
3654 static ssize_t
3655 layout_show(struct mddev *mddev, char *page)
3656 {
3657         /* just a number, not meaningful for all levels */
3658         if (mddev->reshape_position != MaxSector &&
3659             mddev->layout != mddev->new_layout)
3660                 return sprintf(page, "%d (%d)\n",
3661                                mddev->new_layout, mddev->layout);
3662         return sprintf(page, "%d\n", mddev->layout);
3663 }
3664
3665 static ssize_t
3666 layout_store(struct mddev *mddev, const char *buf, size_t len)
3667 {
3668         unsigned int n;
3669         int err;
3670
3671         err = kstrtouint(buf, 10, &n);
3672         if (err < 0)
3673                 return err;
3674         err = mddev_lock(mddev);
3675         if (err)
3676                 return err;
3677
3678         if (mddev->pers) {
3679                 if (mddev->pers->check_reshape == NULL)
3680                         err = -EBUSY;
3681                 else if (mddev->ro)
3682                         err = -EROFS;
3683                 else {
3684                         mddev->new_layout = n;
3685                         err = mddev->pers->check_reshape(mddev);
3686                         if (err)
3687                                 mddev->new_layout = mddev->layout;
3688                 }
3689         } else {
3690                 mddev->new_layout = n;
3691                 if (mddev->reshape_position == MaxSector)
3692                         mddev->layout = n;
3693         }
3694         mddev_unlock(mddev);
3695         return err ?: len;
3696 }
3697 static struct md_sysfs_entry md_layout =
3698 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
3699
3700 static ssize_t
3701 raid_disks_show(struct mddev *mddev, char *page)
3702 {
3703         if (mddev->raid_disks == 0)
3704                 return 0;
3705         if (mddev->reshape_position != MaxSector &&
3706             mddev->delta_disks != 0)
3707                 return sprintf(page, "%d (%d)\n", mddev->raid_disks,
3708                                mddev->raid_disks - mddev->delta_disks);
3709         return sprintf(page, "%d\n", mddev->raid_disks);
3710 }
3711
3712 static int update_raid_disks(struct mddev *mddev, int raid_disks);
3713
3714 static ssize_t
3715 raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
3716 {
3717         unsigned int n;
3718         int err;
3719
3720         err = kstrtouint(buf, 10, &n);
3721         if (err < 0)
3722                 return err;
3723
3724         err = mddev_lock(mddev);
3725         if (err)
3726                 return err;
3727         if (mddev->pers)
3728                 err = update_raid_disks(mddev, n);
3729         else if (mddev->reshape_position != MaxSector) {
3730                 struct md_rdev *rdev;
3731                 int olddisks = mddev->raid_disks - mddev->delta_disks;
3732
3733                 err = -EINVAL;
3734                 rdev_for_each(rdev, mddev) {
3735                         if (olddisks < n &&
3736                             rdev->data_offset < rdev->new_data_offset)
3737                                 goto out_unlock;
3738                         if (olddisks > n &&
3739                             rdev->data_offset > rdev->new_data_offset)
3740                                 goto out_unlock;
3741                 }
3742                 err = 0;
3743                 mddev->delta_disks = n - olddisks;
3744                 mddev->raid_disks = n;
3745                 mddev->reshape_backwards = (mddev->delta_disks < 0);
3746         } else
3747                 mddev->raid_disks = n;
3748 out_unlock:
3749         mddev_unlock(mddev);
3750         return err ? err : len;
3751 }
3752 static struct md_sysfs_entry md_raid_disks =
3753 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
3754
3755 static ssize_t
3756 chunk_size_show(struct mddev *mddev, char *page)
3757 {
3758         if (mddev->reshape_position != MaxSector &&
3759             mddev->chunk_sectors != mddev->new_chunk_sectors)
3760                 return sprintf(page, "%d (%d)\n",
3761                                mddev->new_chunk_sectors << 9,
3762                                mddev->chunk_sectors << 9);
3763         return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
3764 }
3765
3766 static ssize_t
3767 chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
3768 {
3769         unsigned long n;
3770         int err;
3771
3772         err = kstrtoul(buf, 10, &n);
3773         if (err < 0)
3774                 return err;
3775
3776         err = mddev_lock(mddev);
3777         if (err)
3778                 return err;
3779         if (mddev->pers) {
3780                 if (mddev->pers->check_reshape == NULL)
3781                         err = -EBUSY;
3782                 else if (mddev->ro)
3783                         err = -EROFS;
3784                 else {
3785                         mddev->new_chunk_sectors = n >> 9;
3786                         err = mddev->pers->check_reshape(mddev);
3787                         if (err)
3788                                 mddev->new_chunk_sectors = mddev->chunk_sectors;
3789                 }
3790         } else {
3791                 mddev->new_chunk_sectors = n >> 9;
3792                 if (mddev->reshape_position == MaxSector)
3793                         mddev->chunk_sectors = n >> 9;
3794         }
3795         mddev_unlock(mddev);
3796         return err ?: len;
3797 }
3798 static struct md_sysfs_entry md_chunk_size =
3799 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
3800
3801 static ssize_t
3802 resync_start_show(struct mddev *mddev, char *page)
3803 {
3804         if (mddev->recovery_cp == MaxSector)
3805                 return sprintf(page, "none\n");
3806         return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
3807 }
3808
3809 static ssize_t
3810 resync_start_store(struct mddev *mddev, const char *buf, size_t len)
3811 {
3812         unsigned long long n;
3813         int err;
3814
3815         if (cmd_match(buf, "none"))
3816                 n = MaxSector;
3817         else {
3818                 err = kstrtoull(buf, 10, &n);
3819                 if (err < 0)
3820                         return err;
3821                 if (n != (sector_t)n)
3822                         return -EINVAL;
3823         }
3824
3825         err = mddev_lock(mddev);
3826         if (err)
3827                 return err;
3828         if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
3829                 err = -EBUSY;
3830
3831         if (!err) {
3832                 mddev->recovery_cp = n;
3833                 if (mddev->pers)
3834                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
3835         }
3836         mddev_unlock(mddev);
3837         return err ?: len;
3838 }
3839 static struct md_sysfs_entry md_resync_start =
3840 __ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR,
3841                 resync_start_show, resync_start_store);
3842
3843 /*
3844  * The array state can be:
3845  *
3846  * clear
3847  *     No devices, no size, no level
3848  *     Equivalent to STOP_ARRAY ioctl
3849  * inactive
3850  *     May have some settings, but array is not active
3851  *        all IO results in error
3852  *     When written, doesn't tear down array, but just stops it
3853  * suspended (not supported yet)
3854  *     All IO requests will block. The array can be reconfigured.
3855  *     Writing this, if accepted, will block until array is quiescent
3856  * readonly
3857  *     no resync can happen.  no superblocks get written.
3858  *     write requests fail
3859  * read-auto
3860  *     like readonly, but behaves like 'clean' on a write request.
3861  *
3862  * clean - no pending writes, but otherwise active.
3863  *     When written to inactive array, starts without resync
3864  *     If a write request arrives then
3865  *       if metadata is known, mark 'dirty' and switch to 'active'.
3866  *       if not known, block and switch to write-pending
3867  *     If written to an active array that has pending writes, then fails.
3868  * active
3869  *     fully active: IO and resync can be happening.
3870  *     When written to inactive array, starts with resync
3871  *
3872  * write-pending
3873  *     clean, but writes are blocked waiting for 'active' to be written.
3874  *
3875  * active-idle
3876  *     like active, but no writes have been seen for a while (100msec).
3877  *
3878  */
3879 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
3880                    write_pending, active_idle, bad_word};
3881 static char *array_states[] = {
3882         "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
3883         "write-pending", "active-idle", NULL };
3884
3885 static int match_word(const char *word, char **list)
3886 {
3887         int n;
3888         for (n=0; list[n]; n++)
3889                 if (cmd_match(word, list[n]))
3890                         break;
3891         return n;
3892 }
3893
3894 static ssize_t
3895 array_state_show(struct mddev *mddev, char *page)
3896 {
3897         enum array_state st = inactive;
3898
3899         if (mddev->pers)
3900                 switch(mddev->ro) {
3901                 case 1:
3902                         st = readonly;
3903                         break;
3904                 case 2:
3905                         st = read_auto;
3906                         break;
3907                 case 0:
3908                         if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
3909                                 st = write_pending;
3910                         else if (mddev->in_sync)
3911                                 st = clean;
3912                         else if (mddev->safemode)
3913                                 st = active_idle;
3914                         else
3915                                 st = active;
3916                 }
3917         else {
3918                 if (list_empty(&mddev->disks) &&
3919                     mddev->raid_disks == 0 &&
3920                     mddev->dev_sectors == 0)
3921                         st = clear;
3922                 else
3923                         st = inactive;
3924         }
3925         return sprintf(page, "%s\n", array_states[st]);
3926 }
3927
3928 static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
3929 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
3930 static int do_md_run(struct mddev *mddev);
3931 static int restart_array(struct mddev *mddev);
3932
3933 static ssize_t
3934 array_state_store(struct mddev *mddev, const char *buf, size_t len)
3935 {
3936         int err;
3937         enum array_state st = match_word(buf, array_states);
3938
3939         if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) {
3940                 /* don't take reconfig_mutex when toggling between
3941                  * clean and active
3942                  */
3943                 spin_lock(&mddev->lock);
3944                 if (st == active) {
3945                         restart_array(mddev);
3946                         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
3947                         wake_up(&mddev->sb_wait);
3948                         err = 0;
3949                 } else /* st == clean */ {
3950                         restart_array(mddev);
3951                         if (atomic_read(&mddev->writes_pending) == 0) {
3952                                 if (mddev->in_sync == 0) {
3953                                         mddev->in_sync = 1;
3954                                         if (mddev->safemode == 1)
3955                                                 mddev->safemode = 0;
3956                                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
3957                                 }
3958                                 err = 0;
3959                         } else
3960                                 err = -EBUSY;
3961                 }
3962                 if (!err)
3963                         sysfs_notify_dirent_safe(mddev->sysfs_state);
3964                 spin_unlock(&mddev->lock);
3965                 return err ?: len;
3966         }
3967         err = mddev_lock(mddev);
3968         if (err)
3969                 return err;
3970         err = -EINVAL;
3971         switch(st) {
3972         case bad_word:
3973                 break;
3974         case clear:
3975                 /* stopping an active array */
3976                 err = do_md_stop(mddev, 0, NULL);
3977                 break;
3978         case inactive:
3979                 /* stopping an active array */
3980                 if (mddev->pers)
3981                         err = do_md_stop(mddev, 2, NULL);
3982                 else
3983                         err = 0; /* already inactive */
3984                 break;
3985         case suspended:
3986                 break; /* not supported yet */
3987         case readonly:
3988                 if (mddev->pers)
3989                         err = md_set_readonly(mddev, NULL);
3990                 else {
3991                         mddev->ro = 1;
3992                         set_disk_ro(mddev->gendisk, 1);
3993                         err = do_md_run(mddev);
3994                 }
3995                 break;
3996         case read_auto:
3997                 if (mddev->pers) {
3998                         if (mddev->ro == 0)
3999                                 err = md_set_readonly(mddev, NULL);
4000                         else if (mddev->ro == 1)
4001                                 err = restart_array(mddev);
4002                         if (err == 0) {
4003                                 mddev->ro = 2;
4004                                 set_disk_ro(mddev->gendisk, 0);
4005                         }
4006                 } else {
4007                         mddev->ro = 2;
4008                         err = do_md_run(mddev);
4009                 }
4010                 break;
4011         case clean:
4012                 if (mddev->pers) {
4013                         err = restart_array(mddev);
4014                         if (err)
4015                                 break;
4016                         spin_lock(&mddev->lock);
4017                         if (atomic_read(&mddev->writes_pending) == 0) {
4018                                 if (mddev->in_sync == 0) {
4019                                         mddev->in_sync = 1;
4020                                         if (mddev->safemode == 1)
4021                                                 mddev->safemode = 0;
4022                                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
4023                                 }
4024                                 err = 0;
4025                         } else
4026                                 err = -EBUSY;
4027                         spin_unlock(&mddev->lock);
4028                 } else
4029                         err = -EINVAL;
4030                 break;
4031         case active:
4032                 if (mddev->pers) {
4033                         err = restart_array(mddev);
4034                         if (err)
4035                                 break;
4036                         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
4037                         wake_up(&mddev->sb_wait);
4038                         err = 0;
4039                 } else {
4040                         mddev->ro = 0;
4041                         set_disk_ro(mddev->gendisk, 0);
4042                         err = do_md_run(mddev);
4043                 }
4044                 break;
4045         case write_pending:
4046         case active_idle:
4047                 /* these cannot be set */
4048                 break;
4049         }
4050
4051         if (!err) {
4052                 if (mddev->hold_active == UNTIL_IOCTL)
4053                         mddev->hold_active = 0;
4054                 sysfs_notify_dirent_safe(mddev->sysfs_state);
4055         }
4056         mddev_unlock(mddev);
4057         return err ?: len;
4058 }
4059 static struct md_sysfs_entry md_array_state =
4060 __ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
4061
4062 static ssize_t
4063 max_corrected_read_errors_show(struct mddev *mddev, char *page) {
4064         return sprintf(page, "%d\n",
4065                        atomic_read(&mddev->max_corr_read_errors));
4066 }
4067
4068 static ssize_t
4069 max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len)
4070 {
4071         unsigned int n;
4072         int rv;
4073
4074         rv = kstrtouint(buf, 10, &n);
4075         if (rv < 0)
4076                 return rv;
4077         atomic_set(&mddev->max_corr_read_errors, n);
4078         return len;
4079 }
4080
4081 static struct md_sysfs_entry max_corr_read_errors =
4082 __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
4083         max_corrected_read_errors_store);
4084
4085 static ssize_t
4086 null_show(struct mddev *mddev, char *page)
4087 {
4088         return -EINVAL;
4089 }
4090
4091 static ssize_t
4092 new_dev_store(struct mddev *mddev, const char *buf, size_t len)
4093 {
4094         /* buf must be %d:%d\n? giving major and minor numbers */
4095         /* The new device is added to the array.
4096          * If the array has a persistent superblock, we read the
4097          * superblock to initialise info and check validity.
4098          * Otherwise, only checking done is that in bind_rdev_to_array,
4099          * which mainly checks size.
4100          */
4101         char *e;
4102         int major = simple_strtoul(buf, &e, 10);
4103         int minor;
4104         dev_t dev;
4105         struct md_rdev *rdev;
4106         int err;
4107
4108         if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
4109                 return -EINVAL;
4110         minor = simple_strtoul(e+1, &e, 10);
4111         if (*e && *e != '\n')
4112                 return -EINVAL;
4113         dev = MKDEV(major, minor);
4114         if (major != MAJOR(dev) ||
4115             minor != MINOR(dev))
4116                 return -EOVERFLOW;
4117
4118         flush_workqueue(md_misc_wq);
4119
4120         err = mddev_lock(mddev);
4121         if (err)
4122                 return err;
4123         if (mddev->persistent) {
4124                 rdev = md_import_device(dev, mddev->major_version,
4125                                         mddev->minor_version);
4126                 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
4127                         struct md_rdev *rdev0
4128                                 = list_entry(mddev->disks.next,
4129                                              struct md_rdev, same_set);
4130                         err = super_types[mddev->major_version]
4131                                 .load_super(rdev, rdev0, mddev->minor_version);
4132                         if (err < 0)
4133                                 goto out;
4134                 }
4135         } else if (mddev->external)
4136                 rdev = md_import_device(dev, -2, -1);
4137         else
4138                 rdev = md_import_device(dev, -1, -1);
4139
4140         if (IS_ERR(rdev)) {
4141                 mddev_unlock(mddev);
4142                 return PTR_ERR(rdev);
4143         }
4144         err = bind_rdev_to_array(rdev, mddev);
4145  out:
4146         if (err)
4147                 export_rdev(rdev);
4148         mddev_unlock(mddev);
4149         return err ? err : len;
4150 }
4151
4152 static struct md_sysfs_entry md_new_device =
4153 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
4154
4155 static ssize_t
4156 bitmap_store(struct mddev *mddev, const char *buf, size_t len)
4157 {
4158         char *end;
4159         unsigned long chunk, end_chunk;
4160         int err;
4161
4162         err = mddev_lock(mddev);
4163         if (err)
4164                 return err;
4165         if (!mddev->bitmap)
4166                 goto out;
4167         /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
4168         while (*buf) {
4169                 chunk = end_chunk = simple_strtoul(buf, &end, 0);
4170                 if (buf == end) break;
4171                 if (*end == '-') { /* range */
4172                         buf = end + 1;
4173                         end_chunk = simple_strtoul(buf, &end, 0);
4174                         if (buf == end) break;
4175                 }
4176                 if (*end && !isspace(*end)) break;
4177                 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
4178                 buf = skip_spaces(end);
4179         }
4180         bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
4181 out:
4182         mddev_unlock(mddev);
4183         return len;
4184 }
4185
4186 static struct md_sysfs_entry md_bitmap =
4187 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
4188
4189 static ssize_t
4190 size_show(struct mddev *mddev, char *page)
4191 {
4192         return sprintf(page, "%llu\n",
4193                 (unsigned long long)mddev->dev_sectors / 2);
4194 }
4195
4196 static int update_size(struct mddev *mddev, sector_t num_sectors);
4197
4198 static ssize_t
4199 size_store(struct mddev *mddev, const char *buf, size_t len)
4200 {
4201         /* If array is inactive, we can reduce the component size, but
4202          * not increase it (except from 0).
4203          * If array is active, we can try an on-line resize
4204          */
4205         sector_t sectors;
4206         int err = strict_blocks_to_sectors(buf, &sectors);
4207
4208         if (err < 0)
4209                 return err;
4210         err = mddev_lock(mddev);
4211         if (err)
4212                 return err;
4213         if (mddev->pers) {
4214                 err = update_size(mddev, sectors);
4215                 if (err == 0)
4216                         md_update_sb(mddev, 1);
4217         } else {
4218                 if (mddev->dev_sectors == 0 ||
4219                     mddev->dev_sectors > sectors)
4220                         mddev->dev_sectors = sectors;
4221                 else
4222                         err = -ENOSPC;
4223         }
4224         mddev_unlock(mddev);
4225         return err ? err : len;
4226 }
4227
4228 static struct md_sysfs_entry md_size =
4229 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
4230
4231 /* Metadata version.
4232  * This is one of
4233  *   'none' for arrays with no metadata (good luck...)
4234  *   'external' for arrays with externally managed metadata,
4235  * or N.M for internally known formats
4236  */
4237 static ssize_t
4238 metadata_show(struct mddev *mddev, char *page)
4239 {
4240         if (mddev->persistent)
4241                 return sprintf(page, "%d.%d\n",
4242                                mddev->major_version, mddev->minor_version);
4243         else if (mddev->external)
4244                 return sprintf(page, "external:%s\n", mddev->metadata_type);
4245         else
4246                 return sprintf(page, "none\n");
4247 }
4248
4249 static ssize_t
4250 metadata_store(struct mddev *mddev, const char *buf, size_t len)
4251 {
4252         int major, minor;
4253         char *e;
4254         int err;
4255         /* Changing the details of 'external' metadata is
4256          * always permitted.  Otherwise there must be
4257          * no devices attached to the array.
4258          */
4259
4260         err = mddev_lock(mddev);
4261         if (err)
4262                 return err;
4263         err = -EBUSY;
4264         if (mddev->external && strncmp(buf, "external:", 9) == 0)
4265                 ;
4266         else if (!list_empty(&mddev->disks))
4267                 goto out_unlock;
4268
4269         err = 0;
4270         if (cmd_match(buf, "none")) {
4271                 mddev->persistent = 0;
4272                 mddev->external = 0;
4273                 mddev->major_version = 0;
4274                 mddev->minor_version = 90;
4275                 goto out_unlock;
4276         }
4277         if (strncmp(buf, "external:", 9) == 0) {
4278                 size_t namelen = len-9;
4279                 if (namelen >= sizeof(mddev->metadata_type))
4280                         namelen = sizeof(mddev->metadata_type)-1;
4281                 strncpy(mddev->metadata_type, buf+9, namelen);
4282                 mddev->metadata_type[namelen] = 0;
4283                 if (namelen && mddev->metadata_type[namelen-1] == '\n')
4284                         mddev->metadata_type[--namelen] = 0;
4285                 mddev->persistent = 0;
4286                 mddev->external = 1;
4287                 mddev->major_version = 0;
4288                 mddev->minor_version = 90;
4289                 goto out_unlock;
4290         }
4291         major = simple_strtoul(buf, &e, 10);
4292         err = -EINVAL;
4293         if (e==buf || *e != '.')
4294                 goto out_unlock;
4295         buf = e+1;
4296         minor = simple_strtoul(buf, &e, 10);
4297         if (e==buf || (*e && *e != '\n') )
4298                 goto out_unlock;
4299         err = -ENOENT;
4300         if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
4301                 goto out_unlock;
4302         mddev->major_version = major;
4303         mddev->minor_version = minor;
4304         mddev->persistent = 1;
4305         mddev->external = 0;
4306         err = 0;
4307 out_unlock:
4308         mddev_unlock(mddev);
4309         return err ?: len;
4310 }
4311
4312 static struct md_sysfs_entry md_metadata =
4313 __ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
4314
4315 static ssize_t
4316 action_show(struct mddev *mddev, char *page)
4317 {
4318         char *type = "idle";
4319         unsigned long recovery = mddev->recovery;
4320         if (test_bit(MD_RECOVERY_FROZEN, &recovery))
4321                 type = "frozen";
4322         else if (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
4323             (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) {
4324                 if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
4325                         type = "reshape";
4326                 else if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
4327                         if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
4328                                 type = "resync";
4329                         else if (test_bit(MD_RECOVERY_CHECK, &recovery))
4330                                 type = "check";
4331                         else
4332                                 type = "repair";
4333                 } else if (test_bit(MD_RECOVERY_RECOVER, &recovery))
4334                         type = "recover";
4335                 else if (mddev->reshape_position != MaxSector)
4336                         type = "reshape";
4337         }
4338         return sprintf(page, "%s\n", type);
4339 }
4340
4341 static ssize_t
4342 action_store(struct mddev *mddev, const char *page, size_t len)
4343 {
4344         if (!mddev->pers || !mddev->pers->sync_request)
4345                 return -EINVAL;
4346
4347
4348         if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
4349                 if (cmd_match(page, "frozen"))
4350                         set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4351                 else
4352                         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4353                 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
4354                     mddev_lock(mddev) == 0) {
4355                         flush_workqueue(md_misc_wq);
4356                         if (mddev->sync_thread) {
4357                                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4358                                 md_reap_sync_thread(mddev);
4359                         }
4360                         mddev_unlock(mddev);
4361                 }
4362         } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4363                 return -EBUSY;
4364         else if (cmd_match(page, "resync"))
4365                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4366         else if (cmd_match(page, "recover")) {
4367                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4368                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
4369         } else if (cmd_match(page, "reshape")) {
4370                 int err;
4371                 if (mddev->pers->start_reshape == NULL)
4372                         return -EINVAL;
4373                 err = mddev_lock(mddev);
4374                 if (!err) {
4375                         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4376                                 err =  -EBUSY;
4377                         else {
4378                                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4379                                 err = mddev->pers->start_reshape(mddev);
4380                         }
4381                         mddev_unlock(mddev);
4382                 }
4383                 if (err)
4384                         return err;
4385                 sysfs_notify(&mddev->kobj, NULL, "degraded");
4386         } else {
4387                 if (cmd_match(page, "check"))
4388                         set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4389                 else if (!cmd_match(page, "repair"))
4390                         return -EINVAL;
4391                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4392                 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
4393                 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4394         }
4395         if (mddev->ro == 2) {
4396                 /* A write to sync_action is enough to justify
4397                  * canceling read-auto mode
4398                  */
4399                 mddev->ro = 0;
4400                 md_wakeup_thread(mddev->sync_thread);
4401         }
4402         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4403         md_wakeup_thread(mddev->thread);
4404         sysfs_notify_dirent_safe(mddev->sysfs_action);
4405         return len;
4406 }
4407
4408 static struct md_sysfs_entry md_scan_mode =
4409 __ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
4410
4411 static ssize_t
4412 last_sync_action_show(struct mddev *mddev, char *page)
4413 {
4414         return sprintf(page, "%s\n", mddev->last_sync_action);
4415 }
4416
4417 static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action);
4418
4419 static ssize_t
4420 mismatch_cnt_show(struct mddev *mddev, char *page)
4421 {
4422         return sprintf(page, "%llu\n",
4423                        (unsigned long long)
4424                        atomic64_read(&mddev->resync_mismatches));
4425 }
4426
4427 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
4428
4429 static ssize_t
4430 sync_min_show(struct mddev *mddev, char *page)
4431 {
4432         return sprintf(page, "%d (%s)\n", speed_min(mddev),
4433                        mddev->sync_speed_min ? "local": "system");
4434 }
4435
4436 static ssize_t
4437 sync_min_store(struct mddev *mddev, const char *buf, size_t len)
4438 {
4439         unsigned int min;
4440         int rv;
4441
4442         if (strncmp(buf, "system", 6)==0) {
4443                 min = 0;
4444         } else {
4445                 rv = kstrtouint(buf, 10, &min);
4446                 if (rv < 0)
4447                         return rv;
4448                 if (min == 0)
4449                         return -EINVAL;
4450         }
4451         mddev->sync_speed_min = min;
4452         return len;
4453 }
4454
4455 static struct md_sysfs_entry md_sync_min =
4456 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
4457
4458 static ssize_t
4459 sync_max_show(struct mddev *mddev, char *page)
4460 {
4461         return sprintf(page, "%d (%s)\n", speed_max(mddev),
4462                        mddev->sync_speed_max ? "local": "system");
4463 }
4464
4465 static ssize_t
4466 sync_max_store(struct mddev *mddev, const char *buf, size_t len)
4467 {
4468         unsigned int max;
4469         int rv;
4470
4471         if (strncmp(buf, "system", 6)==0) {
4472                 max = 0;
4473         } else {
4474                 rv = kstrtouint(buf, 10, &max);
4475                 if (rv < 0)
4476                         return rv;
4477                 if (max == 0)
4478                         return -EINVAL;
4479         }
4480         mddev->sync_speed_max = max;
4481         return len;
4482 }
4483
4484 static struct md_sysfs_entry md_sync_max =
4485 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
4486
4487 static ssize_t
4488 degraded_show(struct mddev *mddev, char *page)
4489 {
4490         return sprintf(page, "%d\n", mddev->degraded);
4491 }
4492 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
4493
4494 static ssize_t
4495 sync_force_parallel_show(struct mddev *mddev, char *page)
4496 {
4497         return sprintf(page, "%d\n", mddev->parallel_resync);
4498 }
4499
4500 static ssize_t
4501 sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len)
4502 {
4503         long n;
4504
4505         if (kstrtol(buf, 10, &n))
4506                 return -EINVAL;
4507
4508         if (n != 0 && n != 1)
4509                 return -EINVAL;
4510
4511         mddev->parallel_resync = n;
4512
4513         if (mddev->sync_thread)
4514                 wake_up(&resync_wait);
4515
4516         return len;
4517 }
4518
4519 /* force parallel resync, even with shared block devices */
4520 static struct md_sysfs_entry md_sync_force_parallel =
4521 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
4522        sync_force_parallel_show, sync_force_parallel_store);
4523
4524 static ssize_t
4525 sync_speed_show(struct mddev *mddev, char *page)
4526 {
4527         unsigned long resync, dt, db;
4528         if (mddev->curr_resync == 0)
4529                 return sprintf(page, "none\n");
4530         resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
4531         dt = (jiffies - mddev->resync_mark) / HZ;
4532         if (!dt) dt++;
4533         db = resync - mddev->resync_mark_cnt;
4534         return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
4535 }
4536
4537 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
4538
4539 static ssize_t
4540 sync_completed_show(struct mddev *mddev, char *page)
4541 {
4542         unsigned long long max_sectors, resync;
4543
4544         if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4545                 return sprintf(page, "none\n");
4546
4547         if (mddev->curr_resync == 1 ||
4548             mddev->curr_resync == 2)
4549                 return sprintf(page, "delayed\n");
4550
4551         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
4552             test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
4553                 max_sectors = mddev->resync_max_sectors;
4554         else
4555                 max_sectors = mddev->dev_sectors;
4556
4557         resync = mddev->curr_resync_completed;
4558         return sprintf(page, "%llu / %llu\n", resync, max_sectors);
4559 }
4560
4561 static struct md_sysfs_entry md_sync_completed =
4562         __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL);
4563
4564 static ssize_t
4565 min_sync_show(struct mddev *mddev, char *page)
4566 {
4567         return sprintf(page, "%llu\n",
4568                        (unsigned long long)mddev->resync_min);
4569 }
4570 static ssize_t
4571 min_sync_store(struct mddev *mddev, const char *buf, size_t len)
4572 {
4573         unsigned long long min;
4574         int err;
4575
4576         if (kstrtoull(buf, 10, &min))
4577                 return -EINVAL;
4578
4579         spin_lock(&mddev->lock);
4580         err = -EINVAL;
4581         if (min > mddev->resync_max)
4582                 goto out_unlock;
4583
4584         err = -EBUSY;
4585         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4586                 goto out_unlock;
4587
4588         /* Round down to multiple of 4K for safety */
4589         mddev->resync_min = round_down(min, 8);
4590         err = 0;
4591
4592 out_unlock:
4593         spin_unlock(&mddev->lock);
4594         return err ?: len;
4595 }
4596
4597 static struct md_sysfs_entry md_min_sync =
4598 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
4599
4600 static ssize_t
4601 max_sync_show(struct mddev *mddev, char *page)
4602 {
4603         if (mddev->resync_max == MaxSector)
4604                 return sprintf(page, "max\n");
4605         else
4606                 return sprintf(page, "%llu\n",
4607                                (unsigned long long)mddev->resync_max);
4608 }
4609 static ssize_t
4610 max_sync_store(struct mddev *mddev, const char *buf, size_t len)
4611 {
4612         int err;
4613         spin_lock(&mddev->lock);
4614         if (strncmp(buf, "max", 3) == 0)
4615                 mddev->resync_max = MaxSector;
4616         else {
4617                 unsigned long long max;
4618                 int chunk;
4619
4620                 err = -EINVAL;
4621                 if (kstrtoull(buf, 10, &max))
4622                         goto out_unlock;
4623                 if (max < mddev->resync_min)
4624                         goto out_unlock;
4625
4626                 err = -EBUSY;
4627                 if (max < mddev->resync_max &&
4628                     mddev->ro == 0 &&
4629                     test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4630                         goto out_unlock;
4631
4632                 /* Must be a multiple of chunk_size */
4633                 chunk = mddev->chunk_sectors;
4634                 if (chunk) {
4635                         sector_t temp = max;
4636
4637                         err = -EINVAL;
4638                         if (sector_div(temp, chunk))
4639                                 goto out_unlock;
4640                 }
4641                 mddev->resync_max = max;
4642         }
4643         wake_up(&mddev->recovery_wait);
4644         err = 0;
4645 out_unlock:
4646         spin_unlock(&mddev->lock);
4647         return err ?: len;
4648 }
4649
4650 static struct md_sysfs_entry md_max_sync =
4651 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
4652
4653 static ssize_t
4654 suspend_lo_show(struct mddev *mddev, char *page)
4655 {
4656         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
4657 }
4658
4659 static ssize_t
4660 suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
4661 {
4662         unsigned long long old, new;
4663         int err;
4664
4665         err = kstrtoull(buf, 10, &new);
4666         if (err < 0)
4667                 return err;
4668         if (new != (sector_t)new)
4669                 return -EINVAL;
4670
4671         err = mddev_lock(mddev);
4672         if (err)
4673                 return err;
4674         err = -EINVAL;
4675         if (mddev->pers == NULL ||
4676             mddev->pers->quiesce == NULL)
4677                 goto unlock;
4678         old = mddev->suspend_lo;
4679         mddev->suspend_lo = new;
4680         if (new >= old)
4681                 /* Shrinking suspended region */
4682                 mddev->pers->quiesce(mddev, 2);
4683         else {
4684                 /* Expanding suspended region - need to wait */
4685                 mddev->pers->quiesce(mddev, 1);
4686                 mddev->pers->quiesce(mddev, 0);
4687         }
4688         err = 0;
4689 unlock:
4690         mddev_unlock(mddev);
4691         return err ?: len;
4692 }
4693 static struct md_sysfs_entry md_suspend_lo =
4694 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
4695
4696 static ssize_t
4697 suspend_hi_show(struct mddev *mddev, char *page)
4698 {
4699         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
4700 }
4701
4702 static ssize_t
4703 suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
4704 {
4705         unsigned long long old, new;
4706         int err;
4707
4708         err = kstrtoull(buf, 10, &new);
4709         if (err < 0)
4710                 return err;
4711         if (new != (sector_t)new)
4712                 return -EINVAL;
4713
4714         err = mddev_lock(mddev);
4715         if (err)
4716                 return err;
4717         err = -EINVAL;
4718         if (mddev->pers == NULL ||
4719             mddev->pers->quiesce == NULL)
4720                 goto unlock;
4721         old = mddev->suspend_hi;
4722         mddev->suspend_hi = new;
4723         if (new <= old)
4724                 /* Shrinking suspended region */
4725                 mddev->pers->quiesce(mddev, 2);
4726         else {
4727                 /* Expanding suspended region - need to wait */
4728                 mddev->pers->quiesce(mddev, 1);
4729                 mddev->pers->quiesce(mddev, 0);
4730         }
4731         err = 0;
4732 unlock:
4733         mddev_unlock(mddev);
4734         return err ?: len;
4735 }
4736 static struct md_sysfs_entry md_suspend_hi =
4737 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
4738
4739 static ssize_t
4740 reshape_position_show(struct mddev *mddev, char *page)
4741 {
4742         if (mddev->reshape_position != MaxSector)
4743                 return sprintf(page, "%llu\n",
4744                                (unsigned long long)mddev->reshape_position);
4745         strcpy(page, "none\n");
4746         return 5;
4747 }
4748
4749 static ssize_t
4750 reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
4751 {
4752         struct md_rdev *rdev;
4753         unsigned long long new;
4754         int err;
4755
4756         err = kstrtoull(buf, 10, &new);
4757         if (err < 0)
4758                 return err;
4759         if (new != (sector_t)new)
4760                 return -EINVAL;
4761         err = mddev_lock(mddev);
4762         if (err)
4763                 return err;
4764         err = -EBUSY;
4765         if (mddev->pers)
4766                 goto unlock;
4767         mddev->reshape_position = new;
4768         mddev->delta_disks = 0;
4769         mddev->reshape_backwards = 0;
4770         mddev->new_level = mddev->level;
4771         mddev->new_layout = mddev->layout;
4772         mddev->new_chunk_sectors = mddev->chunk_sectors;
4773         rdev_for_each(rdev, mddev)
4774                 rdev->new_data_offset = rdev->data_offset;
4775         err = 0;
4776 unlock:
4777         mddev_unlock(mddev);
4778         return err ?: len;
4779 }
4780
4781 static struct md_sysfs_entry md_reshape_position =
4782 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
4783        reshape_position_store);
4784
4785 static ssize_t
4786 reshape_direction_show(struct mddev *mddev, char *page)
4787 {
4788         return sprintf(page, "%s\n",
4789                        mddev->reshape_backwards ? "backwards" : "forwards");
4790 }
4791
4792 static ssize_t
4793 reshape_direction_store(struct mddev *mddev, const char *buf, size_t len)
4794 {
4795         int backwards = 0;
4796         int err;
4797
4798         if (cmd_match(buf, "forwards"))
4799                 backwards = 0;
4800         else if (cmd_match(buf, "backwards"))
4801                 backwards = 1;
4802         else
4803                 return -EINVAL;
4804         if (mddev->reshape_backwards == backwards)
4805                 return len;
4806
4807         err = mddev_lock(mddev);
4808         if (err)
4809                 return err;
4810         /* check if we are allowed to change */
4811         if (mddev->delta_disks)
4812                 err = -EBUSY;
4813         else if (mddev->persistent &&
4814             mddev->major_version == 0)
4815                 err =  -EINVAL;
4816         else
4817                 mddev->reshape_backwards = backwards;
4818         mddev_unlock(mddev);
4819         return err ?: len;
4820 }
4821
4822 static struct md_sysfs_entry md_reshape_direction =
4823 __ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show,
4824        reshape_direction_store);
4825
4826 static ssize_t
4827 array_size_show(struct mddev *mddev, char *page)
4828 {
4829         if (mddev->external_size)
4830                 return sprintf(page, "%llu\n",
4831                                (unsigned long long)mddev->array_sectors/2);
4832         else
4833                 return sprintf(page, "default\n");
4834 }
4835
4836 static ssize_t
4837 array_size_store(struct mddev *mddev, const char *buf, size_t len)
4838 {
4839         sector_t sectors;
4840         int err;
4841
4842         err = mddev_lock(mddev);
4843         if (err)
4844                 return err;
4845
4846         /* cluster raid doesn't support change array_sectors */
4847         if (mddev_is_clustered(mddev)) {
4848                 mddev_unlock(mddev);
4849                 return -EINVAL;
4850         }
4851
4852         if (strncmp(buf, "default", 7) == 0) {
4853                 if (mddev->pers)
4854                         sectors = mddev->pers->size(mddev, 0, 0);
4855                 else
4856                         sectors = mddev->array_sectors;
4857
4858                 mddev->external_size = 0;
4859         } else {
4860                 if (strict_blocks_to_sectors(buf, &sectors) < 0)
4861                         err = -EINVAL;
4862                 else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
4863                         err = -E2BIG;
4864                 else
4865                         mddev->external_size = 1;
4866         }
4867
4868         if (!err) {
4869                 mddev->array_sectors = sectors;
4870                 if (mddev->pers) {
4871                         set_capacity(mddev->gendisk, mddev->array_sectors);
4872                         revalidate_disk(mddev->gendisk);
4873                 }
4874         }
4875         mddev_unlock(mddev);
4876         return err ?: len;
4877 }
4878
4879 static struct md_sysfs_entry md_array_size =
4880 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
4881        array_size_store);
4882
4883 static struct attribute *md_default_attrs[] = {
4884         &md_level.attr,
4885         &md_layout.attr,
4886         &md_raid_disks.attr,
4887         &md_chunk_size.attr,
4888         &md_size.attr,
4889         &md_resync_start.attr,
4890         &md_metadata.attr,
4891         &md_new_device.attr,
4892         &md_safe_delay.attr,
4893         &md_array_state.attr,
4894         &md_reshape_position.attr,
4895         &md_reshape_direction.attr,
4896         &md_array_size.attr,
4897         &max_corr_read_errors.attr,
4898         NULL,
4899 };
4900
4901 static struct attribute *md_redundancy_attrs[] = {
4902         &md_scan_mode.attr,
4903         &md_last_scan_mode.attr,
4904         &md_mismatches.attr,
4905         &md_sync_min.attr,
4906         &md_sync_max.attr,
4907         &md_sync_speed.attr,
4908         &md_sync_force_parallel.attr,
4909         &md_sync_completed.attr,
4910         &md_min_sync.attr,
4911         &md_max_sync.attr,
4912         &md_suspend_lo.attr,
4913         &md_suspend_hi.attr,
4914         &md_bitmap.attr,
4915         &md_degraded.attr,
4916         NULL,
4917 };
4918 static struct attribute_group md_redundancy_group = {
4919         .name = NULL,
4920         .attrs = md_redundancy_attrs,
4921 };
4922
4923 static ssize_t
4924 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
4925 {
4926         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
4927         struct mddev *mddev = container_of(kobj, struct mddev, kobj);
4928         ssize_t rv;
4929
4930         if (!entry->show)
4931                 return -EIO;
4932         spin_lock(&all_mddevs_lock);
4933         if (list_empty(&mddev->all_mddevs)) {
4934                 spin_unlock(&all_mddevs_lock);
4935                 return -EBUSY;
4936         }
4937         mddev_get(mddev);
4938         spin_unlock(&all_mddevs_lock);
4939
4940         rv = entry->show(mddev, page);
4941         mddev_put(mddev);
4942         return rv;
4943 }
4944
4945 static ssize_t
4946 md_attr_store(struct kobject *kobj, struct attribute *attr,
4947               const char *page, size_t length)
4948 {
4949         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
4950         struct mddev *mddev = container_of(kobj, struct mddev, kobj);
4951         ssize_t rv;
4952
4953         if (!entry->store)
4954                 return -EIO;
4955         if (!capable(CAP_SYS_ADMIN))
4956                 return -EACCES;
4957         spin_lock(&all_mddevs_lock);
4958         if (list_empty(&mddev->all_mddevs)) {
4959                 spin_unlock(&all_mddevs_lock);
4960                 return -EBUSY;
4961         }
4962         mddev_get(mddev);
4963         spin_unlock(&all_mddevs_lock);
4964         rv = entry->store(mddev, page, length);
4965         mddev_put(mddev);
4966         return rv;
4967 }
4968
4969 static void md_free(struct kobject *ko)
4970 {
4971         struct mddev *mddev = container_of(ko, struct mddev, kobj);
4972
4973         if (mddev->sysfs_state)
4974                 sysfs_put(mddev->sysfs_state);
4975
4976         if (mddev->queue)
4977                 blk_cleanup_queue(mddev->queue);
4978         if (mddev->gendisk) {
4979                 del_gendisk(mddev->gendisk);
4980                 put_disk(mddev->gendisk);
4981         }
4982
4983         kfree(mddev);
4984 }
4985
4986 static const struct sysfs_ops md_sysfs_ops = {
4987         .show   = md_attr_show,
4988         .store  = md_attr_store,
4989 };
4990 static struct kobj_type md_ktype = {
4991         .release        = md_free,
4992         .sysfs_ops      = &md_sysfs_ops,
4993         .default_attrs  = md_default_attrs,
4994 };
4995
4996 int mdp_major = 0;
4997
4998 static void mddev_delayed_delete(struct work_struct *ws)
4999 {
5000         struct mddev *mddev = container_of(ws, struct mddev, del_work);
5001
5002         sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
5003         kobject_del(&mddev->kobj);
5004         kobject_put(&mddev->kobj);
5005 }
5006
5007 static int md_alloc(dev_t dev, char *name)
5008 {
5009         static DEFINE_MUTEX(disks_mutex);
5010         struct mddev *mddev = mddev_find(dev);
5011         struct gendisk *disk;
5012         int partitioned;
5013         int shift;
5014         int unit;
5015         int error;
5016
5017         if (!mddev)
5018                 return -ENODEV;
5019
5020         partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
5021         shift = partitioned ? MdpMinorShift : 0;
5022         unit = MINOR(mddev->unit) >> shift;
5023
5024         /* wait for any previous instance of this device to be
5025          * completely removed (mddev_delayed_delete).
5026          */
5027         flush_workqueue(md_misc_wq);
5028
5029         mutex_lock(&disks_mutex);
5030         error = -EEXIST;
5031         if (mddev->gendisk)
5032                 goto abort;
5033
5034         if (name) {
5035                 /* Need to ensure that 'name' is not a duplicate.
5036                  */
5037                 struct mddev *mddev2;
5038                 spin_lock(&all_mddevs_lock);
5039
5040                 list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
5041                         if (mddev2->gendisk &&
5042                             strcmp(mddev2->gendisk->disk_name, name) == 0) {
5043                                 spin_unlock(&all_mddevs_lock);
5044                                 goto abort;
5045                         }
5046                 spin_unlock(&all_mddevs_lock);
5047         }
5048
5049         error = -ENOMEM;
5050         mddev->queue = blk_alloc_queue(GFP_KERNEL);
5051         if (!mddev->queue)
5052                 goto abort;
5053         mddev->queue->queuedata = mddev;
5054
5055         blk_queue_make_request(mddev->queue, md_make_request);
5056         blk_set_stacking_limits(&mddev->queue->limits);
5057
5058         disk = alloc_disk(1 << shift);
5059         if (!disk) {
5060                 blk_cleanup_queue(mddev->queue);
5061                 mddev->queue = NULL;
5062                 goto abort;
5063         }
5064         disk->major = MAJOR(mddev->unit);
5065         disk->first_minor = unit << shift;
5066         if (name)
5067                 strcpy(disk->disk_name, name);
5068         else if (partitioned)
5069                 sprintf(disk->disk_name, "md_d%d", unit);
5070         else
5071                 sprintf(disk->disk_name, "md%d", unit);
5072         disk->fops = &md_fops;
5073         disk->private_data = mddev;
5074         disk->queue = mddev->queue;
5075         blk_queue_write_cache(mddev->queue, true, true);
5076         /* Allow extended partitions.  This makes the
5077          * 'mdp' device redundant, but we can't really
5078          * remove it now.
5079          */
5080         disk->flags |= GENHD_FL_EXT_DEVT;
5081         mddev->gendisk = disk;
5082         /* As soon as we call add_disk(), another thread could get
5083          * through to md_open, so make sure it doesn't get too far
5084          */
5085         mutex_lock(&mddev->open_mutex);
5086         add_disk(disk);
5087
5088         error = kobject_init_and_add(&mddev->kobj, &md_ktype,
5089                                      &disk_to_dev(disk)->kobj, "%s", "md");
5090         if (error) {
5091                 /* This isn't possible, but as kobject_init_and_add is marked
5092                  * __must_check, we must do something with the result
5093                  */
5094                 printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
5095                        disk->disk_name);
5096                 error = 0;
5097         }
5098         if (mddev->kobj.sd &&
5099             sysfs_create_group(&mddev->kobj, &md_bitmap_group))
5100                 printk(KERN_DEBUG "pointless warning\n");
5101         mutex_unlock(&mddev->open_mutex);
5102  abort:
5103         mutex_unlock(&disks_mutex);
5104         if (!error && mddev->kobj.sd) {
5105                 kobject_uevent(&mddev->kobj, KOBJ_ADD);
5106                 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
5107         }
5108         mddev_put(mddev);
5109         return error;
5110 }
5111
5112 static struct kobject *md_probe(dev_t dev, int *part, void *data)
5113 {
5114         md_alloc(dev, NULL);
5115         return NULL;
5116 }
5117
5118 static int add_named_array(const char *val, struct kernel_param *kp)
5119 {
5120         /* val must be "md_*" where * is not all digits.
5121          * We allocate an array with a large free minor number, and
5122          * set the name to val.  val must not already be an active name.
5123          */
5124         int len = strlen(val);
5125         char buf[DISK_NAME_LEN];
5126
5127         while (len && val[len-1] == '\n')
5128                 len--;
5129         if (len >= DISK_NAME_LEN)
5130                 return -E2BIG;
5131         strlcpy(buf, val, len+1);
5132         if (strncmp(buf, "md_", 3) != 0)
5133                 return -EINVAL;
5134         return md_alloc(0, buf);
5135 }
5136
5137 static void md_safemode_timeout(unsigned long data)
5138 {
5139         struct mddev *mddev = (struct mddev *) data;
5140
5141         if (!atomic_read(&mddev->writes_pending)) {
5142                 mddev->safemode = 1;
5143                 if (mddev->external)
5144                         sysfs_notify_dirent_safe(mddev->sysfs_state);
5145         }
5146         md_wakeup_thread(mddev->thread);
5147 }
5148
5149 static int start_dirty_degraded;
5150
5151 int md_run(struct mddev *mddev)
5152 {
5153         int err;
5154         struct md_rdev *rdev;
5155         struct md_personality *pers;
5156
5157         if (list_empty(&mddev->disks))
5158                 /* cannot run an array with no devices.. */
5159                 return -EINVAL;
5160
5161         if (mddev->pers)
5162                 return -EBUSY;
5163         /* Cannot run until previous stop completes properly */
5164         if (mddev->sysfs_active)
5165                 return -EBUSY;
5166
5167         /*
5168          * Analyze all RAID superblock(s)
5169          */
5170         if (!mddev->raid_disks) {
5171                 if (!mddev->persistent)
5172                         return -EINVAL;
5173                 analyze_sbs(mddev);
5174         }
5175
5176         if (mddev->level != LEVEL_NONE)
5177                 request_module("md-level-%d", mddev->level);
5178         else if (mddev->clevel[0])
5179                 request_module("md-%s", mddev->clevel);
5180
5181         /*
5182          * Drop all container device buffers, from now on
5183          * the only valid external interface is through the md
5184          * device.
5185          */
5186         rdev_for_each(rdev, mddev) {
5187                 if (test_bit(Faulty, &rdev->flags))
5188                         continue;
5189                 sync_blockdev(rdev->bdev);
5190                 invalidate_bdev(rdev->bdev);
5191
5192                 /* perform some consistency tests on the device.
5193                  * We don't want the data to overlap the metadata,
5194                  * Internal Bitmap issues have been handled elsewhere.
5195                  */
5196                 if (rdev->meta_bdev) {
5197                         /* Nothing to check */;
5198                 } else if (rdev->data_offset < rdev->sb_start) {
5199                         if (mddev->dev_sectors &&
5200                             rdev->data_offset + mddev->dev_sectors
5201                             > rdev->sb_start) {
5202                                 printk("md: %s: data overlaps metadata\n",
5203                                        mdname(mddev));
5204                                 return -EINVAL;
5205                         }
5206                 } else {
5207                         if (rdev->sb_start + rdev->sb_size/512
5208                             > rdev->data_offset) {
5209                                 printk("md: %s: metadata overlaps data\n",
5210                                        mdname(mddev));
5211                                 return -EINVAL;
5212                         }
5213                 }
5214                 sysfs_notify_dirent_safe(rdev->sysfs_state);
5215         }
5216
5217         if (mddev->bio_set == NULL)
5218                 mddev->bio_set = bioset_create(BIO_POOL_SIZE, 0);
5219
5220         spin_lock(&pers_lock);
5221         pers = find_pers(mddev->level, mddev->clevel);
5222         if (!pers || !try_module_get(pers->owner)) {
5223                 spin_unlock(&pers_lock);
5224                 if (mddev->level != LEVEL_NONE)
5225                         printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
5226                                mddev->level);
5227                 else
5228                         printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
5229                                mddev->clevel);
5230                 return -EINVAL;
5231         }
5232         spin_unlock(&pers_lock);
5233         if (mddev->level != pers->level) {
5234                 mddev->level = pers->level;
5235                 mddev->new_level = pers->level;
5236         }
5237         strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
5238
5239         if (mddev->reshape_position != MaxSector &&
5240             pers->start_reshape == NULL) {
5241                 /* This personality cannot handle reshaping... */
5242                 module_put(pers->owner);
5243                 return -EINVAL;
5244         }
5245
5246         if (pers->sync_request) {
5247                 /* Warn if this is a potentially silly
5248                  * configuration.
5249                  */
5250                 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
5251                 struct md_rdev *rdev2;
5252                 int warned = 0;
5253
5254                 rdev_for_each(rdev, mddev)
5255                         rdev_for_each(rdev2, mddev) {
5256                                 if (rdev < rdev2 &&
5257                                     rdev->bdev->bd_contains ==
5258                                     rdev2->bdev->bd_contains) {
5259                                         printk(KERN_WARNING
5260                                                "%s: WARNING: %s appears to be"
5261                                                " on the same physical disk as"
5262                                                " %s.\n",
5263                                                mdname(mddev),
5264                                                bdevname(rdev->bdev,b),
5265                                                bdevname(rdev2->bdev,b2));
5266                                         warned = 1;
5267                                 }
5268                         }
5269
5270                 if (warned)
5271                         printk(KERN_WARNING
5272                                "True protection against single-disk"
5273                                " failure might be compromised.\n");
5274         }
5275
5276         mddev->recovery = 0;
5277         /* may be over-ridden by personality */
5278         mddev->resync_max_sectors = mddev->dev_sectors;
5279
5280         mddev->ok_start_degraded = start_dirty_degraded;
5281
5282         if (start_readonly && mddev->ro == 0)
5283                 mddev->ro = 2; /* read-only, but switch on first write */
5284
5285         err = pers->run(mddev);
5286         if (err)
5287                 printk(KERN_ERR "md: pers->run() failed ...\n");
5288         else if (pers->size(mddev, 0, 0) < mddev->array_sectors) {
5289                 WARN_ONCE(!mddev->external_size, "%s: default size too small,"
5290                           " but 'external_size' not in effect?\n", __func__);
5291                 printk(KERN_ERR
5292                        "md: invalid array_size %llu > default size %llu\n",
5293                        (unsigned long long)mddev->array_sectors / 2,
5294                        (unsigned long long)pers->size(mddev, 0, 0) / 2);
5295                 err = -EINVAL;
5296         }
5297         if (err == 0 && pers->sync_request &&
5298             (mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
5299                 struct bitmap *bitmap;
5300
5301                 bitmap = bitmap_create(mddev, -1);
5302                 if (IS_ERR(bitmap)) {
5303                         err = PTR_ERR(bitmap);
5304                         printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
5305                                mdname(mddev), err);
5306                 } else
5307                         mddev->bitmap = bitmap;
5308
5309         }
5310         if (err) {
5311                 mddev_detach(mddev);
5312                 if (mddev->private)
5313                         pers->free(mddev, mddev->private);
5314                 mddev->private = NULL;
5315                 module_put(pers->owner);
5316                 bitmap_destroy(mddev);
5317                 return err;
5318         }
5319         if (mddev->queue) {
5320                 bool nonrot = true;
5321
5322                 rdev_for_each(rdev, mddev) {
5323                         if (rdev->raid_disk >= 0 &&
5324                             !blk_queue_nonrot(bdev_get_queue(rdev->bdev))) {
5325                                 nonrot = false;
5326                                 break;
5327                         }
5328                 }
5329                 if (mddev->degraded)
5330                         nonrot = false;
5331                 if (nonrot)
5332                         queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mddev->queue);
5333                 else
5334                         queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, mddev->queue);
5335                 mddev->queue->backing_dev_info.congested_data = mddev;
5336                 mddev->queue->backing_dev_info.congested_fn = md_congested;
5337         }
5338         if (pers->sync_request) {
5339                 if (mddev->kobj.sd &&
5340                     sysfs_create_group(&mddev->kobj, &md_redundancy_group))
5341                         printk(KERN_WARNING
5342                                "md: cannot register extra attributes for %s\n",
5343                                mdname(mddev));
5344                 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
5345         } else if (mddev->ro == 2) /* auto-readonly not meaningful */
5346                 mddev->ro = 0;
5347
5348         atomic_set(&mddev->writes_pending,0);
5349         atomic_set(&mddev->max_corr_read_errors,
5350                    MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
5351         mddev->safemode = 0;
5352         if (mddev_is_clustered(mddev))
5353                 mddev->safemode_delay = 0;
5354         else
5355                 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
5356         mddev->in_sync = 1;
5357         smp_wmb();
5358         spin_lock(&mddev->lock);
5359         mddev->pers = pers;
5360         spin_unlock(&mddev->lock);
5361         rdev_for_each(rdev, mddev)
5362                 if (rdev->raid_disk >= 0)
5363                         if (sysfs_link_rdev(mddev, rdev))
5364                                 /* failure here is OK */;
5365
5366         if (mddev->degraded && !mddev->ro)
5367                 /* This ensures that recovering status is reported immediately
5368                  * via sysfs - until a lack of spares is confirmed.
5369                  */
5370                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
5371         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5372
5373         if (mddev->flags & MD_UPDATE_SB_FLAGS)
5374                 md_update_sb(mddev, 0);
5375
5376         md_new_event(mddev);
5377         sysfs_notify_dirent_safe(mddev->sysfs_state);
5378         sysfs_notify_dirent_safe(mddev->sysfs_action);
5379         sysfs_notify(&mddev->kobj, NULL, "degraded");
5380         return 0;
5381 }
5382 EXPORT_SYMBOL_GPL(md_run);
5383
5384 static int do_md_run(struct mddev *mddev)
5385 {
5386         int err;
5387
5388         err = md_run(mddev);
5389         if (err)
5390                 goto out;
5391         err = bitmap_load(mddev);
5392         if (err) {
5393                 bitmap_destroy(mddev);
5394                 goto out;
5395         }
5396
5397         if (mddev_is_clustered(mddev))
5398                 md_allow_write(mddev);
5399
5400         md_wakeup_thread(mddev->thread);
5401         md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
5402
5403         set_capacity(mddev->gendisk, mddev->array_sectors);
5404         revalidate_disk(mddev->gendisk);
5405         mddev->changed = 1;
5406         kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
5407 out:
5408         return err;
5409 }
5410
5411 static int restart_array(struct mddev *mddev)
5412 {
5413         struct gendisk *disk = mddev->gendisk;
5414
5415         /* Complain if it has no devices */
5416         if (list_empty(&mddev->disks))
5417                 return -ENXIO;
5418         if (!mddev->pers)
5419                 return -EINVAL;
5420         if (!mddev->ro)
5421                 return -EBUSY;
5422         if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
5423                 struct md_rdev *rdev;
5424                 bool has_journal = false;
5425
5426                 rcu_read_lock();
5427                 rdev_for_each_rcu(rdev, mddev) {
5428                         if (test_bit(Journal, &rdev->flags) &&
5429                             !test_bit(Faulty, &rdev->flags)) {
5430                                 has_journal = true;
5431                                 break;
5432                         }
5433                 }
5434                 rcu_read_unlock();
5435
5436                 /* Don't restart rw with journal missing/faulty */
5437                 if (!has_journal)
5438                         return -EINVAL;
5439         }
5440
5441         mddev->safemode = 0;
5442         mddev->ro = 0;
5443         set_disk_ro(disk, 0);
5444         printk(KERN_INFO "md: %s switched to read-write mode.\n",
5445                 mdname(mddev));
5446         /* Kick recovery or resync if necessary */
5447         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5448         md_wakeup_thread(mddev->thread);
5449         md_wakeup_thread(mddev->sync_thread);
5450         sysfs_notify_dirent_safe(mddev->sysfs_state);
5451         return 0;
5452 }
5453
5454 static void md_clean(struct mddev *mddev)
5455 {
5456         mddev->array_sectors = 0;
5457         mddev->external_size = 0;
5458         mddev->dev_sectors = 0;
5459         mddev->raid_disks = 0;
5460         mddev->recovery_cp = 0;
5461         mddev->resync_min = 0;
5462         mddev->resync_max = MaxSector;
5463         mddev->reshape_position = MaxSector;
5464         mddev->external = 0;
5465         mddev->persistent = 0;
5466         mddev->level = LEVEL_NONE;
5467         mddev->clevel[0] = 0;
5468         mddev->flags = 0;
5469         mddev->ro = 0;
5470         mddev->metadata_type[0] = 0;
5471         mddev->chunk_sectors = 0;
5472         mddev->ctime = mddev->utime = 0;
5473         mddev->layout = 0;
5474         mddev->max_disks = 0;
5475         mddev->events = 0;
5476         mddev->can_decrease_events = 0;
5477         mddev->delta_disks = 0;
5478         mddev->reshape_backwards = 0;
5479         mddev->new_level = LEVEL_NONE;
5480         mddev->new_layout = 0;
5481         mddev->new_chunk_sectors = 0;
5482         mddev->curr_resync = 0;
5483         atomic64_set(&mddev->resync_mismatches, 0);
5484         mddev->suspend_lo = mddev->suspend_hi = 0;
5485         mddev->sync_speed_min = mddev->sync_speed_max = 0;
5486         mddev->recovery = 0;
5487         mddev->in_sync = 0;
5488         mddev->changed = 0;
5489         mddev->degraded = 0;
5490         mddev->safemode = 0;
5491         mddev->private = NULL;
5492         mddev->cluster_info = NULL;
5493         mddev->bitmap_info.offset = 0;
5494         mddev->bitmap_info.default_offset = 0;
5495         mddev->bitmap_info.default_space = 0;
5496         mddev->bitmap_info.chunksize = 0;
5497         mddev->bitmap_info.daemon_sleep = 0;
5498         mddev->bitmap_info.max_write_behind = 0;
5499         mddev->bitmap_info.nodes = 0;
5500 }
5501
5502 static void __md_stop_writes(struct mddev *mddev)
5503 {
5504         set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5505         flush_workqueue(md_misc_wq);
5506         if (mddev->sync_thread) {
5507                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5508                 md_reap_sync_thread(mddev);
5509         }
5510
5511         del_timer_sync(&mddev->safemode_timer);
5512
5513         bitmap_flush(mddev);
5514         md_super_wait(mddev);
5515
5516         if (mddev->ro == 0 &&
5517             ((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
5518              (mddev->flags & MD_UPDATE_SB_FLAGS))) {
5519                 /* mark array as shutdown cleanly */
5520                 if (!mddev_is_clustered(mddev))
5521                         mddev->in_sync = 1;
5522                 md_update_sb(mddev, 1);
5523         }
5524 }
5525
5526 void md_stop_writes(struct mddev *mddev)
5527 {
5528         mddev_lock_nointr(mddev);
5529         __md_stop_writes(mddev);
5530         mddev_unlock(mddev);
5531 }
5532 EXPORT_SYMBOL_GPL(md_stop_writes);
5533
5534 static void mddev_detach(struct mddev *mddev)
5535 {
5536         struct bitmap *bitmap = mddev->bitmap;
5537         /* wait for behind writes to complete */
5538         if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
5539                 printk(KERN_INFO "md:%s: behind writes in progress - waiting to stop.\n",
5540                        mdname(mddev));
5541                 /* need to kick something here to make sure I/O goes? */
5542                 wait_event(bitmap->behind_wait,
5543                            atomic_read(&bitmap->behind_writes) == 0);
5544         }
5545         if (mddev->pers && mddev->pers->quiesce) {
5546                 mddev->pers->quiesce(mddev, 1);
5547                 mddev->pers->quiesce(mddev, 0);
5548         }
5549         md_unregister_thread(&mddev->thread);
5550         if (mddev->queue)
5551                 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
5552 }
5553
5554 static void __md_stop(struct mddev *mddev)
5555 {
5556         struct md_personality *pers = mddev->pers;
5557         mddev_detach(mddev);
5558         /* Ensure ->event_work is done */
5559         flush_workqueue(md_misc_wq);
5560         spin_lock(&mddev->lock);
5561         mddev->pers = NULL;
5562         spin_unlock(&mddev->lock);
5563         pers->free(mddev, mddev->private);
5564         mddev->private = NULL;
5565         if (pers->sync_request && mddev->to_remove == NULL)
5566                 mddev->to_remove = &md_redundancy_group;
5567         module_put(pers->owner);
5568         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5569 }
5570
5571 void md_stop(struct mddev *mddev)
5572 {
5573         /* stop the array and free an attached data structures.
5574          * This is called from dm-raid
5575          */
5576         __md_stop(mddev);
5577         bitmap_destroy(mddev);
5578         if (mddev->bio_set)
5579                 bioset_free(mddev->bio_set);
5580 }
5581
5582 EXPORT_SYMBOL_GPL(md_stop);
5583
5584 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
5585 {
5586         int err = 0;
5587         int did_freeze = 0;
5588
5589         if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
5590                 did_freeze = 1;
5591                 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5592                 md_wakeup_thread(mddev->thread);
5593         }
5594         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5595                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5596         if (mddev->sync_thread)
5597                 /* Thread might be blocked waiting for metadata update
5598                  * which will now never happen */
5599                 wake_up_process(mddev->sync_thread->tsk);
5600
5601         if (mddev->external && test_bit(MD_CHANGE_PENDING, &mddev->flags))
5602                 return -EBUSY;
5603         mddev_unlock(mddev);
5604         wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
5605                                           &mddev->recovery));
5606         wait_event(mddev->sb_wait,
5607                    !test_bit(MD_CHANGE_PENDING, &mddev->flags));
5608         mddev_lock_nointr(mddev);
5609
5610         mutex_lock(&mddev->open_mutex);
5611         if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
5612             mddev->sync_thread ||
5613             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
5614                 printk("md: %s still in use.\n",mdname(mddev));
5615                 if (did_freeze) {
5616                         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5617                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5618                         md_wakeup_thread(mddev->thread);
5619                 }
5620                 err = -EBUSY;
5621                 goto out;
5622         }
5623         if (mddev->pers) {
5624                 __md_stop_writes(mddev);
5625
5626                 err  = -ENXIO;
5627                 if (mddev->ro==1)
5628                         goto out;
5629                 mddev->ro = 1;
5630                 set_disk_ro(mddev->gendisk, 1);
5631                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5632                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5633                 md_wakeup_thread(mddev->thread);
5634                 sysfs_notify_dirent_safe(mddev->sysfs_state);
5635                 err = 0;
5636         }
5637 out:
5638         mutex_unlock(&mddev->open_mutex);
5639         return err;
5640 }
5641
5642 /* mode:
5643  *   0 - completely stop and dis-assemble array
5644  *   2 - stop but do not disassemble array
5645  */
5646 static int do_md_stop(struct mddev *mddev, int mode,
5647                       struct block_device *bdev)
5648 {
5649         struct gendisk *disk = mddev->gendisk;
5650         struct md_rdev *rdev;
5651         int did_freeze = 0;
5652
5653         if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
5654                 did_freeze = 1;
5655                 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5656                 md_wakeup_thread(mddev->thread);
5657         }
5658         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5659                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5660         if (mddev->sync_thread)
5661                 /* Thread might be blocked waiting for metadata update
5662                  * which will now never happen */
5663                 wake_up_process(mddev->sync_thread->tsk);
5664
5665         mddev_unlock(mddev);
5666         wait_event(resync_wait, (mddev->sync_thread == NULL &&
5667                                  !test_bit(MD_RECOVERY_RUNNING,
5668                                            &mddev->recovery)));
5669         mddev_lock_nointr(mddev);
5670
5671         mutex_lock(&mddev->open_mutex);
5672         if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
5673             mddev->sysfs_active ||
5674             mddev->sync_thread ||
5675             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
5676                 printk("md: %s still in use.\n",mdname(mddev));
5677                 mutex_unlock(&mddev->open_mutex);
5678                 if (did_freeze) {
5679                         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5680                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5681                         md_wakeup_thread(mddev->thread);
5682                 }
5683                 return -EBUSY;
5684         }
5685         if (mddev->pers) {
5686                 if (mddev->ro)
5687                         set_disk_ro(disk, 0);
5688
5689                 __md_stop_writes(mddev);
5690                 __md_stop(mddev);
5691                 mddev->queue->backing_dev_info.congested_fn = NULL;
5692
5693                 /* tell userspace to handle 'inactive' */
5694                 sysfs_notify_dirent_safe(mddev->sysfs_state);
5695
5696                 rdev_for_each(rdev, mddev)
5697                         if (rdev->raid_disk >= 0)
5698                                 sysfs_unlink_rdev(mddev, rdev);
5699
5700                 set_capacity(disk, 0);
5701                 mutex_unlock(&mddev->open_mutex);
5702                 mddev->changed = 1;
5703                 revalidate_disk(disk);
5704
5705                 if (mddev->ro)
5706                         mddev->ro = 0;
5707         } else
5708                 mutex_unlock(&mddev->open_mutex);
5709         /*
5710          * Free resources if final stop
5711          */
5712         if (mode == 0) {
5713                 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
5714
5715                 bitmap_destroy(mddev);
5716                 if (mddev->bitmap_info.file) {
5717                         struct file *f = mddev->bitmap_info.file;
5718                         spin_lock(&mddev->lock);
5719                         mddev->bitmap_info.file = NULL;
5720                         spin_unlock(&mddev->lock);
5721                         fput(f);
5722                 }
5723                 mddev->bitmap_info.offset = 0;
5724
5725                 export_array(mddev);
5726
5727                 md_clean(mddev);
5728                 if (mddev->hold_active == UNTIL_STOP)
5729                         mddev->hold_active = 0;
5730         }
5731         md_new_event(mddev);
5732         sysfs_notify_dirent_safe(mddev->sysfs_state);
5733         return 0;
5734 }
5735
5736 #ifndef MODULE
5737 static void autorun_array(struct mddev *mddev)
5738 {
5739         struct md_rdev *rdev;
5740         int err;
5741
5742         if (list_empty(&mddev->disks))
5743                 return;
5744
5745         printk(KERN_INFO "md: running: ");
5746
5747         rdev_for_each(rdev, mddev) {
5748                 char b[BDEVNAME_SIZE];
5749                 printk("<%s>", bdevname(rdev->bdev,b));
5750         }
5751         printk("\n");
5752
5753         err = do_md_run(mddev);
5754         if (err) {
5755                 printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
5756                 do_md_stop(mddev, 0, NULL);
5757         }
5758 }
5759
5760 /*
5761  * lets try to run arrays based on all disks that have arrived
5762  * until now. (those are in pending_raid_disks)
5763  *
5764  * the method: pick the first pending disk, collect all disks with
5765  * the same UUID, remove all from the pending list and put them into
5766  * the 'same_array' list. Then order this list based on superblock
5767  * update time (freshest comes first), kick out 'old' disks and
5768  * compare superblocks. If everything's fine then run it.
5769  *
5770  * If "unit" is allocated, then bump its reference count
5771  */
5772 static void autorun_devices(int part)
5773 {
5774         struct md_rdev *rdev0, *rdev, *tmp;
5775         struct mddev *mddev;
5776         char b[BDEVNAME_SIZE];
5777
5778         printk(KERN_INFO "md: autorun ...\n");
5779         while (!list_empty(&pending_raid_disks)) {
5780                 int unit;
5781                 dev_t dev;
5782                 LIST_HEAD(candidates);
5783                 rdev0 = list_entry(pending_raid_disks.next,
5784                                          struct md_rdev, same_set);
5785
5786                 printk(KERN_INFO "md: considering %s ...\n",
5787                         bdevname(rdev0->bdev,b));
5788                 INIT_LIST_HEAD(&candidates);
5789                 rdev_for_each_list(rdev, tmp, &pending_raid_disks)
5790                         if (super_90_load(rdev, rdev0, 0) >= 0) {
5791                                 printk(KERN_INFO "md:  adding %s ...\n",
5792                                         bdevname(rdev->bdev,b));
5793                                 list_move(&rdev->same_set, &candidates);
5794                         }
5795                 /*
5796                  * now we have a set of devices, with all of them having
5797                  * mostly sane superblocks. It's time to allocate the
5798                  * mddev.
5799                  */
5800                 if (part) {
5801                         dev = MKDEV(mdp_major,
5802                                     rdev0->preferred_minor << MdpMinorShift);
5803                         unit = MINOR(dev) >> MdpMinorShift;
5804                 } else {
5805                         dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
5806                         unit = MINOR(dev);
5807                 }
5808                 if (rdev0->preferred_minor != unit) {
5809                         printk(KERN_INFO "md: unit number in %s is bad: %d\n",
5810                                bdevname(rdev0->bdev, b), rdev0->preferred_minor);
5811                         break;
5812                 }
5813
5814                 md_probe(dev, NULL, NULL);
5815                 mddev = mddev_find(dev);
5816                 if (!mddev || !mddev->gendisk) {
5817                         if (mddev)
5818                                 mddev_put(mddev);
5819                         printk(KERN_ERR
5820                                 "md: cannot allocate memory for md drive.\n");
5821                         break;
5822                 }
5823                 if (mddev_lock(mddev))
5824                         printk(KERN_WARNING "md: %s locked, cannot run\n",
5825                                mdname(mddev));
5826                 else if (mddev->raid_disks || mddev->major_version
5827                          || !list_empty(&mddev->disks)) {
5828                         printk(KERN_WARNING
5829                                 "md: %s already running, cannot run %s\n",
5830                                 mdname(mddev), bdevname(rdev0->bdev,b));
5831                         mddev_unlock(mddev);
5832                 } else {
5833                         printk(KERN_INFO "md: created %s\n", mdname(mddev));
5834                         mddev->persistent = 1;
5835                         rdev_for_each_list(rdev, tmp, &candidates) {
5836                                 list_del_init(&rdev->same_set);
5837                                 if (bind_rdev_to_array(rdev, mddev))
5838                                         export_rdev(rdev);
5839                         }
5840                         autorun_array(mddev);
5841                         mddev_unlock(mddev);
5842                 }
5843                 /* on success, candidates will be empty, on error
5844                  * it won't...
5845                  */
5846                 rdev_for_each_list(rdev, tmp, &candidates) {
5847                         list_del_init(&rdev->same_set);
5848                         export_rdev(rdev);
5849                 }
5850                 mddev_put(mddev);
5851         }
5852         printk(KERN_INFO "md: ... autorun DONE.\n");
5853 }
5854 #endif /* !MODULE */
5855
5856 static int get_version(void __user *arg)
5857 {
5858         mdu_version_t ver;
5859
5860         ver.major = MD_MAJOR_VERSION;
5861         ver.minor = MD_MINOR_VERSION;
5862         ver.patchlevel = MD_PATCHLEVEL_VERSION;
5863
5864         if (copy_to_user(arg, &ver, sizeof(ver)))
5865                 return -EFAULT;
5866
5867         return 0;
5868 }
5869
5870 static int get_array_info(struct mddev *mddev, void __user *arg)
5871 {
5872         mdu_array_info_t info;
5873         int nr,working,insync,failed,spare;
5874         struct md_rdev *rdev;
5875
5876         nr = working = insync = failed = spare = 0;
5877         rcu_read_lock();
5878         rdev_for_each_rcu(rdev, mddev) {
5879                 nr++;
5880                 if (test_bit(Faulty, &rdev->flags))
5881                         failed++;
5882                 else {
5883                         working++;
5884                         if (test_bit(In_sync, &rdev->flags))
5885                                 insync++;
5886                         else if (test_bit(Journal, &rdev->flags))
5887                                 /* TODO: add journal count to md_u.h */
5888                                 ;
5889                         else
5890                                 spare++;
5891                 }
5892         }
5893         rcu_read_unlock();
5894
5895         info.major_version = mddev->major_version;
5896         info.minor_version = mddev->minor_version;
5897         info.patch_version = MD_PATCHLEVEL_VERSION;
5898         info.ctime         = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
5899         info.level         = mddev->level;
5900         info.size          = mddev->dev_sectors / 2;
5901         if (info.size != mddev->dev_sectors / 2) /* overflow */
5902                 info.size = -1;
5903         info.nr_disks      = nr;
5904         info.raid_disks    = mddev->raid_disks;
5905         info.md_minor      = mddev->md_minor;
5906         info.not_persistent= !mddev->persistent;
5907
5908         info.utime         = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
5909         info.state         = 0;
5910         if (mddev->in_sync)
5911                 info.state = (1<<MD_SB_CLEAN);
5912         if (mddev->bitmap && mddev->bitmap_info.offset)
5913                 info.state |= (1<<MD_SB_BITMAP_PRESENT);
5914         if (mddev_is_clustered(mddev))
5915                 info.state |= (1<<MD_SB_CLUSTERED);
5916         info.active_disks  = insync;
5917         info.working_disks = working;
5918         info.failed_disks  = failed;
5919         info.spare_disks   = spare;
5920
5921         info.layout        = mddev->layout;
5922         info.chunk_size    = mddev->chunk_sectors << 9;
5923
5924         if (copy_to_user(arg, &info, sizeof(info)))
5925                 return -EFAULT;
5926
5927         return 0;
5928 }
5929
5930 static int get_bitmap_file(struct mddev *mddev, void __user * arg)
5931 {
5932         mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
5933         char *ptr;
5934         int err;
5935
5936         file = kzalloc(sizeof(*file), GFP_NOIO);
5937         if (!file)
5938                 return -ENOMEM;
5939
5940         err = 0;
5941         spin_lock(&mddev->lock);
5942         /* bitmap enabled */
5943         if (mddev->bitmap_info.file) {
5944                 ptr = file_path(mddev->bitmap_info.file, file->pathname,
5945                                 sizeof(file->pathname));
5946                 if (IS_ERR(ptr))
5947                         err = PTR_ERR(ptr);
5948                 else
5949                         memmove(file->pathname, ptr,
5950                                 sizeof(file->pathname)-(ptr-file->pathname));
5951         }
5952         spin_unlock(&mddev->lock);
5953
5954         if (err == 0 &&
5955             copy_to_user(arg, file, sizeof(*file)))
5956                 err = -EFAULT;
5957
5958         kfree(file);
5959         return err;
5960 }
5961
5962 static int get_disk_info(struct mddev *mddev, void __user * arg)
5963 {
5964         mdu_disk_info_t info;
5965         struct md_rdev *rdev;
5966
5967         if (copy_from_user(&info, arg, sizeof(info)))
5968                 return -EFAULT;
5969
5970         rcu_read_lock();
5971         rdev = md_find_rdev_nr_rcu(mddev, info.number);
5972         if (rdev) {
5973                 info.major = MAJOR(rdev->bdev->bd_dev);
5974                 info.minor = MINOR(rdev->bdev->bd_dev);
5975                 info.raid_disk = rdev->raid_disk;
5976                 info.state = 0;
5977                 if (test_bit(Faulty, &rdev->flags))
5978                         info.state |= (1<<MD_DISK_FAULTY);
5979                 else if (test_bit(In_sync, &rdev->flags)) {
5980                         info.state |= (1<<MD_DISK_ACTIVE);
5981                         info.state |= (1<<MD_DISK_SYNC);
5982                 }
5983                 if (test_bit(Journal, &rdev->flags))
5984                         info.state |= (1<<MD_DISK_JOURNAL);
5985                 if (test_bit(WriteMostly, &rdev->flags))
5986                         info.state |= (1<<MD_DISK_WRITEMOSTLY);
5987         } else {
5988                 info.major = info.minor = 0;
5989                 info.raid_disk = -1;
5990                 info.state = (1<<MD_DISK_REMOVED);
5991         }
5992         rcu_read_unlock();
5993
5994         if (copy_to_user(arg, &info, sizeof(info)))
5995                 return -EFAULT;
5996
5997         return 0;
5998 }
5999
6000 static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
6001 {
6002         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
6003         struct md_rdev *rdev;
6004         dev_t dev = MKDEV(info->major,info->minor);
6005
6006         if (mddev_is_clustered(mddev) &&
6007                 !(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) {
6008                 pr_err("%s: Cannot add to clustered mddev.\n",
6009                                mdname(mddev));
6010                 return -EINVAL;
6011         }
6012
6013         if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
6014                 return -EOVERFLOW;
6015
6016         if (!mddev->raid_disks) {
6017                 int err;
6018                 /* expecting a device which has a superblock */
6019                 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
6020                 if (IS_ERR(rdev)) {
6021                         printk(KERN_WARNING
6022                                 "md: md_import_device returned %ld\n",
6023                                 PTR_ERR(rdev));
6024                         return PTR_ERR(rdev);
6025                 }
6026                 if (!list_empty(&mddev->disks)) {
6027                         struct md_rdev *rdev0
6028                                 = list_entry(mddev->disks.next,
6029                                              struct md_rdev, same_set);
6030                         err = super_types[mddev->major_version]
6031                                 .load_super(rdev, rdev0, mddev->minor_version);
6032                         if (err < 0) {
6033                                 printk(KERN_WARNING
6034                                         "md: %s has different UUID to %s\n",
6035                                         bdevname(rdev->bdev,b),
6036                                         bdevname(rdev0->bdev,b2));
6037                                 export_rdev(rdev);
6038                                 return -EINVAL;
6039                         }
6040                 }
6041                 err = bind_rdev_to_array(rdev, mddev);
6042                 if (err)
6043                         export_rdev(rdev);
6044                 return err;
6045         }
6046
6047         /*
6048          * add_new_disk can be used once the array is assembled
6049          * to add "hot spares".  They must already have a superblock
6050          * written
6051          */
6052         if (mddev->pers) {
6053                 int err;
6054                 if (!mddev->pers->hot_add_disk) {
6055                         printk(KERN_WARNING
6056                                 "%s: personality does not support diskops!\n",
6057                                mdname(mddev));
6058                         return -EINVAL;
6059                 }
6060                 if (mddev->persistent)
6061                         rdev = md_import_device(dev, mddev->major_version,
6062                                                 mddev->minor_version);
6063                 else
6064                         rdev = md_import_device(dev, -1, -1);
6065                 if (IS_ERR(rdev)) {
6066                         printk(KERN_WARNING
6067                                 "md: md_import_device returned %ld\n",
6068                                 PTR_ERR(rdev));
6069                         return PTR_ERR(rdev);
6070                 }
6071                 /* set saved_raid_disk if appropriate */
6072                 if (!mddev->persistent) {
6073                         if (info->state & (1<<MD_DISK_SYNC)  &&
6074                             info->raid_disk < mddev->raid_disks) {
6075                                 rdev->raid_disk = info->raid_disk;
6076                                 set_bit(In_sync, &rdev->flags);
6077                                 clear_bit(Bitmap_sync, &rdev->flags);
6078                         } else
6079                                 rdev->raid_disk = -1;
6080                         rdev->saved_raid_disk = rdev->raid_disk;
6081                 } else
6082                         super_types[mddev->major_version].
6083                                 validate_super(mddev, rdev);
6084                 if ((info->state & (1<<MD_DISK_SYNC)) &&
6085                      rdev->raid_disk != info->raid_disk) {
6086                         /* This was a hot-add request, but events doesn't
6087                          * match, so reject it.
6088                          */
6089                         export_rdev(rdev);
6090                         return -EINVAL;
6091                 }
6092
6093                 clear_bit(In_sync, &rdev->flags); /* just to be sure */
6094                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6095                         set_bit(WriteMostly, &rdev->flags);
6096                 else
6097                         clear_bit(WriteMostly, &rdev->flags);
6098
6099                 if (info->state & (1<<MD_DISK_JOURNAL)) {
6100                         struct md_rdev *rdev2;
6101                         bool has_journal = false;
6102
6103                         /* make sure no existing journal disk */
6104                         rdev_for_each(rdev2, mddev) {
6105                                 if (test_bit(Journal, &rdev2->flags)) {
6106                                         has_journal = true;
6107                                         break;
6108                                 }
6109                         }
6110                         if (has_journal) {
6111                                 export_rdev(rdev);
6112                                 return -EBUSY;
6113                         }
6114                         set_bit(Journal, &rdev->flags);
6115                 }
6116                 /*
6117                  * check whether the device shows up in other nodes
6118                  */
6119                 if (mddev_is_clustered(mddev)) {
6120                         if (info->state & (1 << MD_DISK_CANDIDATE))
6121                                 set_bit(Candidate, &rdev->flags);
6122                         else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) {
6123                                 /* --add initiated by this node */
6124                                 err = md_cluster_ops->add_new_disk(mddev, rdev);
6125                                 if (err) {
6126                                         export_rdev(rdev);
6127                                         return err;
6128                                 }
6129                         }
6130                 }
6131
6132                 rdev->raid_disk = -1;
6133                 err = bind_rdev_to_array(rdev, mddev);
6134
6135                 if (err)
6136                         export_rdev(rdev);
6137
6138                 if (mddev_is_clustered(mddev)) {
6139                         if (info->state & (1 << MD_DISK_CANDIDATE)) {
6140                                 if (!err) {
6141                                         err = md_cluster_ops->new_disk_ack(mddev,
6142                                                 err == 0);
6143                                         if (err)
6144                                                 md_kick_rdev_from_array(rdev);
6145                                 }
6146                         } else {
6147                                 if (err)
6148                                         md_cluster_ops->add_new_disk_cancel(mddev);
6149                                 else
6150                                         err = add_bound_rdev(rdev);
6151                         }
6152
6153                 } else if (!err)
6154                         err = add_bound_rdev(rdev);
6155
6156                 return err;
6157         }
6158
6159         /* otherwise, add_new_disk is only allowed
6160          * for major_version==0 superblocks
6161          */
6162         if (mddev->major_version != 0) {
6163                 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
6164                        mdname(mddev));
6165                 return -EINVAL;
6166         }
6167
6168         if (!(info->state & (1<<MD_DISK_FAULTY))) {
6169                 int err;
6170                 rdev = md_import_device(dev, -1, 0);
6171                 if (IS_ERR(rdev)) {
6172                         printk(KERN_WARNING
6173                                 "md: error, md_import_device() returned %ld\n",
6174                                 PTR_ERR(rdev));
6175                         return PTR_ERR(rdev);
6176                 }
6177                 rdev->desc_nr = info->number;
6178                 if (info->raid_disk < mddev->raid_disks)
6179                         rdev->raid_disk = info->raid_disk;
6180                 else
6181                         rdev->raid_disk = -1;
6182
6183                 if (rdev->raid_disk < mddev->raid_disks)
6184                         if (info->state & (1<<MD_DISK_SYNC))
6185                                 set_bit(In_sync, &rdev->flags);
6186
6187                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6188                         set_bit(WriteMostly, &rdev->flags);
6189
6190                 if (!mddev->persistent) {
6191                         printk(KERN_INFO "md: nonpersistent superblock ...\n");
6192                         rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
6193                 } else
6194                         rdev->sb_start = calc_dev_sboffset(rdev);
6195                 rdev->sectors = rdev->sb_start;
6196
6197                 err = bind_rdev_to_array(rdev, mddev);
6198                 if (err) {
6199                         export_rdev(rdev);
6200                         return err;
6201                 }
6202         }
6203
6204         return 0;
6205 }
6206
6207 static int hot_remove_disk(struct mddev *mddev, dev_t dev)
6208 {
6209         char b[BDEVNAME_SIZE];
6210         struct md_rdev *rdev;
6211
6212         if (!mddev->pers)
6213                 return -ENODEV;
6214
6215         rdev = find_rdev(mddev, dev);
6216         if (!rdev)
6217                 return -ENXIO;
6218
6219         if (rdev->raid_disk < 0)
6220                 goto kick_rdev;
6221
6222         clear_bit(Blocked, &rdev->flags);
6223         remove_and_add_spares(mddev, rdev);
6224
6225         if (rdev->raid_disk >= 0)
6226                 goto busy;
6227
6228 kick_rdev:
6229         if (mddev_is_clustered(mddev))
6230                 md_cluster_ops->remove_disk(mddev, rdev);
6231
6232         md_kick_rdev_from_array(rdev);
6233         md_update_sb(mddev, 1);
6234         md_new_event(mddev);
6235
6236         return 0;
6237 busy:
6238         printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n",
6239                 bdevname(rdev->bdev,b), mdname(mddev));
6240         return -EBUSY;
6241 }
6242
6243 static int hot_add_disk(struct mddev *mddev, dev_t dev)
6244 {
6245         char b[BDEVNAME_SIZE];
6246         int err;
6247         struct md_rdev *rdev;
6248
6249         if (!mddev->pers)
6250                 return -ENODEV;
6251
6252         if (mddev->major_version != 0) {
6253                 printk(KERN_WARNING "%s: HOT_ADD may only be used with"
6254                         " version-0 superblocks.\n",
6255                         mdname(mddev));
6256                 return -EINVAL;
6257         }
6258         if (!mddev->pers->hot_add_disk) {
6259                 printk(KERN_WARNING
6260                         "%s: personality does not support diskops!\n",
6261                         mdname(mddev));
6262                 return -EINVAL;
6263         }
6264
6265         rdev = md_import_device(dev, -1, 0);
6266         if (IS_ERR(rdev)) {
6267                 printk(KERN_WARNING
6268                         "md: error, md_import_device() returned %ld\n",
6269                         PTR_ERR(rdev));
6270                 return -EINVAL;
6271         }
6272
6273         if (mddev->persistent)
6274                 rdev->sb_start = calc_dev_sboffset(rdev);
6275         else
6276                 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
6277
6278         rdev->sectors = rdev->sb_start;
6279
6280         if (test_bit(Faulty, &rdev->flags)) {
6281                 printk(KERN_WARNING
6282                         "md: can not hot-add faulty %s disk to %s!\n",
6283                         bdevname(rdev->bdev,b), mdname(mddev));
6284                 err = -EINVAL;
6285                 goto abort_export;
6286         }
6287
6288         clear_bit(In_sync, &rdev->flags);
6289         rdev->desc_nr = -1;
6290         rdev->saved_raid_disk = -1;
6291         err = bind_rdev_to_array(rdev, mddev);
6292         if (err)
6293                 goto abort_export;
6294
6295         /*
6296          * The rest should better be atomic, we can have disk failures
6297          * noticed in interrupt contexts ...
6298          */
6299
6300         rdev->raid_disk = -1;
6301
6302         md_update_sb(mddev, 1);
6303         /*
6304          * Kick recovery, maybe this spare has to be added to the
6305          * array immediately.
6306          */
6307         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6308         md_wakeup_thread(mddev->thread);
6309         md_new_event(mddev);
6310         return 0;
6311
6312 abort_export:
6313         export_rdev(rdev);
6314         return err;
6315 }
6316
6317 static int set_bitmap_file(struct mddev *mddev, int fd)
6318 {
6319         int err = 0;
6320
6321         if (mddev->pers) {
6322                 if (!mddev->pers->quiesce || !mddev->thread)
6323                         return -EBUSY;
6324                 if (mddev->recovery || mddev->sync_thread)
6325                         return -EBUSY;
6326                 /* we should be able to change the bitmap.. */
6327         }
6328
6329         if (fd >= 0) {
6330                 struct inode *inode;
6331                 struct file *f;
6332
6333                 if (mddev->bitmap || mddev->bitmap_info.file)
6334                         return -EEXIST; /* cannot add when bitmap is present */
6335                 f = fget(fd);
6336
6337                 if (f == NULL) {
6338                         printk(KERN_ERR "%s: error: failed to get bitmap file\n",
6339                                mdname(mddev));
6340                         return -EBADF;
6341                 }
6342
6343                 inode = f->f_mapping->host;
6344                 if (!S_ISREG(inode->i_mode)) {
6345                         printk(KERN_ERR "%s: error: bitmap file must be a regular file\n",
6346                                mdname(mddev));
6347                         err = -EBADF;
6348                 } else if (!(f->f_mode & FMODE_WRITE)) {
6349                         printk(KERN_ERR "%s: error: bitmap file must open for write\n",
6350                                mdname(mddev));
6351                         err = -EBADF;
6352                 } else if (atomic_read(&inode->i_writecount) != 1) {
6353                         printk(KERN_ERR "%s: error: bitmap file is already in use\n",
6354                                mdname(mddev));
6355                         err = -EBUSY;
6356                 }
6357                 if (err) {
6358                         fput(f);
6359                         return err;
6360                 }
6361                 mddev->bitmap_info.file = f;
6362                 mddev->bitmap_info.offset = 0; /* file overrides offset */
6363         } else if (mddev->bitmap == NULL)
6364                 return -ENOENT; /* cannot remove what isn't there */
6365         err = 0;
6366         if (mddev->pers) {
6367                 mddev->pers->quiesce(mddev, 1);
6368                 if (fd >= 0) {
6369                         struct bitmap *bitmap;
6370
6371                         bitmap = bitmap_create(mddev, -1);
6372                         if (!IS_ERR(bitmap)) {
6373                                 mddev->bitmap = bitmap;
6374                                 err = bitmap_load(mddev);
6375                         } else
6376                                 err = PTR_ERR(bitmap);
6377                 }
6378                 if (fd < 0 || err) {
6379                         bitmap_destroy(mddev);
6380                         fd = -1; /* make sure to put the file */
6381                 }
6382                 mddev->pers->quiesce(mddev, 0);
6383         }
6384         if (fd < 0) {
6385                 struct file *f = mddev->bitmap_info.file;
6386                 if (f) {
6387                         spin_lock(&mddev->lock);
6388                         mddev->bitmap_info.file = NULL;
6389                         spin_unlock(&mddev->lock);
6390                         fput(f);
6391                 }
6392         }
6393
6394         return err;
6395 }
6396
6397 /*
6398  * set_array_info is used two different ways
6399  * The original usage is when creating a new array.
6400  * In this usage, raid_disks is > 0 and it together with
6401  *  level, size, not_persistent,layout,chunksize determine the
6402  *  shape of the array.
6403  *  This will always create an array with a type-0.90.0 superblock.
6404  * The newer usage is when assembling an array.
6405  *  In this case raid_disks will be 0, and the major_version field is
6406  *  use to determine which style super-blocks are to be found on the devices.
6407  *  The minor and patch _version numbers are also kept incase the
6408  *  super_block handler wishes to interpret them.
6409  */
6410 static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
6411 {
6412
6413         if (info->raid_disks == 0) {
6414                 /* just setting version number for superblock loading */
6415                 if (info->major_version < 0 ||
6416                     info->major_version >= ARRAY_SIZE(super_types) ||
6417                     super_types[info->major_version].name == NULL) {
6418                         /* maybe try to auto-load a module? */
6419                         printk(KERN_INFO
6420                                 "md: superblock version %d not known\n",
6421                                 info->major_version);
6422                         return -EINVAL;
6423                 }
6424                 mddev->major_version = info->major_version;
6425                 mddev->minor_version = info->minor_version;
6426                 mddev->patch_version = info->patch_version;
6427                 mddev->persistent = !info->not_persistent;
6428                 /* ensure mddev_put doesn't delete this now that there
6429                  * is some minimal configuration.
6430                  */
6431                 mddev->ctime         = ktime_get_real_seconds();
6432                 return 0;
6433         }
6434         mddev->major_version = MD_MAJOR_VERSION;
6435         mddev->minor_version = MD_MINOR_VERSION;
6436         mddev->patch_version = MD_PATCHLEVEL_VERSION;
6437         mddev->ctime         = ktime_get_real_seconds();
6438
6439         mddev->level         = info->level;
6440         mddev->clevel[0]     = 0;
6441         mddev->dev_sectors   = 2 * (sector_t)info->size;
6442         mddev->raid_disks    = info->raid_disks;
6443         /* don't set md_minor, it is determined by which /dev/md* was
6444          * openned
6445          */
6446         if (info->state & (1<<MD_SB_CLEAN))
6447                 mddev->recovery_cp = MaxSector;
6448         else
6449                 mddev->recovery_cp = 0;
6450         mddev->persistent    = ! info->not_persistent;
6451         mddev->external      = 0;
6452
6453         mddev->layout        = info->layout;
6454         mddev->chunk_sectors = info->chunk_size >> 9;
6455
6456         mddev->max_disks     = MD_SB_DISKS;
6457
6458         if (mddev->persistent)
6459                 mddev->flags         = 0;
6460         set_bit(MD_CHANGE_DEVS, &mddev->flags);
6461
6462         mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
6463         mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
6464         mddev->bitmap_info.offset = 0;
6465
6466         mddev->reshape_position = MaxSector;
6467
6468         /*
6469          * Generate a 128 bit UUID
6470          */
6471         get_random_bytes(mddev->uuid, 16);
6472
6473         mddev->new_level = mddev->level;
6474         mddev->new_chunk_sectors = mddev->chunk_sectors;
6475         mddev->new_layout = mddev->layout;
6476         mddev->delta_disks = 0;
6477         mddev->reshape_backwards = 0;
6478
6479         return 0;
6480 }
6481
6482 void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
6483 {
6484         WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__);
6485
6486         if (mddev->external_size)
6487                 return;
6488
6489         mddev->array_sectors = array_sectors;
6490 }
6491 EXPORT_SYMBOL(md_set_array_sectors);
6492
6493 static int update_size(struct mddev *mddev, sector_t num_sectors)
6494 {
6495         struct md_rdev *rdev;
6496         int rv;
6497         int fit = (num_sectors == 0);
6498
6499         /* cluster raid doesn't support update size */
6500         if (mddev_is_clustered(mddev))
6501                 return -EINVAL;
6502
6503         if (mddev->pers->resize == NULL)
6504                 return -EINVAL;
6505         /* The "num_sectors" is the number of sectors of each device that
6506          * is used.  This can only make sense for arrays with redundancy.
6507          * linear and raid0 always use whatever space is available. We can only
6508          * consider changing this number if no resync or reconstruction is
6509          * happening, and if the new size is acceptable. It must fit before the
6510          * sb_start or, if that is <data_offset, it must fit before the size
6511          * of each device.  If num_sectors is zero, we find the largest size
6512          * that fits.
6513          */
6514         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
6515             mddev->sync_thread)
6516                 return -EBUSY;
6517         if (mddev->ro)
6518                 return -EROFS;
6519
6520         rdev_for_each(rdev, mddev) {
6521                 sector_t avail = rdev->sectors;
6522
6523                 if (fit && (num_sectors == 0 || num_sectors > avail))
6524                         num_sectors = avail;
6525                 if (avail < num_sectors)
6526                         return -ENOSPC;
6527         }
6528         rv = mddev->pers->resize(mddev, num_sectors);
6529         if (!rv)
6530                 revalidate_disk(mddev->gendisk);
6531         return rv;
6532 }
6533
6534 static int update_raid_disks(struct mddev *mddev, int raid_disks)
6535 {
6536         int rv;
6537         struct md_rdev *rdev;
6538         /* change the number of raid disks */
6539         if (mddev->pers->check_reshape == NULL)
6540                 return -EINVAL;
6541         if (mddev->ro)
6542                 return -EROFS;
6543         if (raid_disks <= 0 ||
6544             (mddev->max_disks && raid_disks >= mddev->max_disks))
6545                 return -EINVAL;
6546         if (mddev->sync_thread ||
6547             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
6548             mddev->reshape_position != MaxSector)
6549                 return -EBUSY;
6550
6551         rdev_for_each(rdev, mddev) {
6552                 if (mddev->raid_disks < raid_disks &&
6553                     rdev->data_offset < rdev->new_data_offset)
6554                         return -EINVAL;
6555                 if (mddev->raid_disks > raid_disks &&
6556                     rdev->data_offset > rdev->new_data_offset)
6557                         return -EINVAL;
6558         }
6559
6560         mddev->delta_disks = raid_disks - mddev->raid_disks;
6561         if (mddev->delta_disks < 0)
6562                 mddev->reshape_backwards = 1;
6563         else if (mddev->delta_disks > 0)
6564                 mddev->reshape_backwards = 0;
6565
6566         rv = mddev->pers->check_reshape(mddev);
6567         if (rv < 0) {
6568                 mddev->delta_disks = 0;
6569                 mddev->reshape_backwards = 0;
6570         }
6571         return rv;
6572 }
6573
6574 /*
6575  * update_array_info is used to change the configuration of an
6576  * on-line array.
6577  * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
6578  * fields in the info are checked against the array.
6579  * Any differences that cannot be handled will cause an error.
6580  * Normally, only one change can be managed at a time.
6581  */
6582 static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
6583 {
6584         int rv = 0;
6585         int cnt = 0;
6586         int state = 0;
6587
6588         /* calculate expected state,ignoring low bits */
6589         if (mddev->bitmap && mddev->bitmap_info.offset)
6590                 state |= (1 << MD_SB_BITMAP_PRESENT);
6591
6592         if (mddev->major_version != info->major_version ||
6593             mddev->minor_version != info->minor_version ||
6594 /*          mddev->patch_version != info->patch_version || */
6595             mddev->ctime         != info->ctime         ||
6596             mddev->level         != info->level         ||
6597 /*          mddev->layout        != info->layout        || */
6598             mddev->persistent    != !info->not_persistent ||
6599             mddev->chunk_sectors != info->chunk_size >> 9 ||
6600             /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
6601             ((state^info->state) & 0xfffffe00)
6602                 )
6603                 return -EINVAL;
6604         /* Check there is only one change */
6605         if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
6606                 cnt++;
6607         if (mddev->raid_disks != info->raid_disks)
6608                 cnt++;
6609         if (mddev->layout != info->layout)
6610                 cnt++;
6611         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
6612                 cnt++;
6613         if (cnt == 0)
6614                 return 0;
6615         if (cnt > 1)
6616                 return -EINVAL;
6617
6618         if (mddev->layout != info->layout) {
6619                 /* Change layout
6620                  * we don't need to do anything at the md level, the
6621                  * personality will take care of it all.
6622                  */
6623                 if (mddev->pers->check_reshape == NULL)
6624                         return -EINVAL;
6625                 else {
6626                         mddev->new_layout = info->layout;
6627                         rv = mddev->pers->check_reshape(mddev);
6628                         if (rv)
6629                                 mddev->new_layout = mddev->layout;
6630                         return rv;
6631                 }
6632         }
6633         if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
6634                 rv = update_size(mddev, (sector_t)info->size * 2);
6635
6636         if (mddev->raid_disks    != info->raid_disks)
6637                 rv = update_raid_disks(mddev, info->raid_disks);
6638
6639         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
6640                 if (mddev->pers->quiesce == NULL || mddev->thread == NULL) {
6641                         rv = -EINVAL;
6642                         goto err;
6643                 }
6644                 if (mddev->recovery || mddev->sync_thread) {
6645                         rv = -EBUSY;
6646                         goto err;
6647                 }
6648                 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
6649                         struct bitmap *bitmap;
6650                         /* add the bitmap */
6651                         if (mddev->bitmap) {
6652                                 rv = -EEXIST;
6653                                 goto err;
6654                         }
6655                         if (mddev->bitmap_info.default_offset == 0) {
6656                                 rv = -EINVAL;
6657                                 goto err;
6658                         }
6659                         mddev->bitmap_info.offset =
6660                                 mddev->bitmap_info.default_offset;
6661                         mddev->bitmap_info.space =
6662                                 mddev->bitmap_info.default_space;
6663                         mddev->pers->quiesce(mddev, 1);
6664                         bitmap = bitmap_create(mddev, -1);
6665                         if (!IS_ERR(bitmap)) {
6666                                 mddev->bitmap = bitmap;
6667                                 rv = bitmap_load(mddev);
6668                         } else
6669                                 rv = PTR_ERR(bitmap);
6670                         if (rv)
6671                                 bitmap_destroy(mddev);
6672                         mddev->pers->quiesce(mddev, 0);
6673                 } else {
6674                         /* remove the bitmap */
6675                         if (!mddev->bitmap) {
6676                                 rv = -ENOENT;
6677                                 goto err;
6678                         }
6679                         if (mddev->bitmap->storage.file) {
6680                                 rv = -EINVAL;
6681                                 goto err;
6682                         }
6683                         if (mddev->bitmap_info.nodes) {
6684                                 /* hold PW on all the bitmap lock */
6685                                 if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) {
6686                                         printk("md: can't change bitmap to none since the"
6687                                                " array is in use by more than one node\n");
6688                                         rv = -EPERM;
6689                                         md_cluster_ops->unlock_all_bitmaps(mddev);
6690                                         goto err;
6691                                 }
6692
6693                                 mddev->bitmap_info.nodes = 0;
6694                                 md_cluster_ops->leave(mddev);
6695                         }
6696                         mddev->pers->quiesce(mddev, 1);
6697                         bitmap_destroy(mddev);
6698                         mddev->pers->quiesce(mddev, 0);
6699                         mddev->bitmap_info.offset = 0;
6700                 }
6701         }
6702         md_update_sb(mddev, 1);
6703         return rv;
6704 err:
6705         return rv;
6706 }
6707
6708 static int set_disk_faulty(struct mddev *mddev, dev_t dev)
6709 {
6710         struct md_rdev *rdev;
6711         int err = 0;
6712
6713         if (mddev->pers == NULL)
6714                 return -ENODEV;
6715
6716         rcu_read_lock();
6717         rdev = find_rdev_rcu(mddev, dev);
6718         if (!rdev)
6719                 err =  -ENODEV;
6720         else {
6721                 md_error(mddev, rdev);
6722                 if (!test_bit(Faulty, &rdev->flags))
6723                         err = -EBUSY;
6724         }
6725         rcu_read_unlock();
6726         return err;
6727 }
6728
6729 /*
6730  * We have a problem here : there is no easy way to give a CHS
6731  * virtual geometry. We currently pretend that we have a 2 heads
6732  * 4 sectors (with a BIG number of cylinders...). This drives
6733  * dosfs just mad... ;-)
6734  */
6735 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
6736 {
6737         struct mddev *mddev = bdev->bd_disk->private_data;
6738
6739         geo->heads = 2;
6740         geo->sectors = 4;
6741         geo->cylinders = mddev->array_sectors / 8;
6742         return 0;
6743 }
6744
6745 static inline bool md_ioctl_valid(unsigned int cmd)
6746 {
6747         switch (cmd) {
6748         case ADD_NEW_DISK:
6749         case BLKROSET:
6750         case GET_ARRAY_INFO:
6751         case GET_BITMAP_FILE:
6752         case GET_DISK_INFO:
6753         case HOT_ADD_DISK:
6754         case HOT_REMOVE_DISK:
6755         case RAID_AUTORUN:
6756         case RAID_VERSION:
6757         case RESTART_ARRAY_RW:
6758         case RUN_ARRAY:
6759         case SET_ARRAY_INFO:
6760         case SET_BITMAP_FILE:
6761         case SET_DISK_FAULTY:
6762         case STOP_ARRAY:
6763         case STOP_ARRAY_RO:
6764         case CLUSTERED_DISK_NACK:
6765                 return true;
6766         default:
6767                 return false;
6768         }
6769 }
6770
6771 static int md_ioctl(struct block_device *bdev, fmode_t mode,
6772                         unsigned int cmd, unsigned long arg)
6773 {
6774         int err = 0;
6775         void __user *argp = (void __user *)arg;
6776         struct mddev *mddev = NULL;
6777         int ro;
6778         bool did_set_md_closing = false;
6779
6780         if (!md_ioctl_valid(cmd))
6781                 return -ENOTTY;
6782
6783         switch (cmd) {
6784         case RAID_VERSION:
6785         case GET_ARRAY_INFO:
6786         case GET_DISK_INFO:
6787                 break;
6788         default:
6789                 if (!capable(CAP_SYS_ADMIN))
6790                         return -EACCES;
6791         }
6792
6793         /*
6794          * Commands dealing with the RAID driver but not any
6795          * particular array:
6796          */
6797         switch (cmd) {
6798         case RAID_VERSION:
6799                 err = get_version(argp);
6800                 goto out;
6801
6802 #ifndef MODULE
6803         case RAID_AUTORUN:
6804                 err = 0;
6805                 autostart_arrays(arg);
6806                 goto out;
6807 #endif
6808         default:;
6809         }
6810
6811         /*
6812          * Commands creating/starting a new array:
6813          */
6814
6815         mddev = bdev->bd_disk->private_data;
6816
6817         if (!mddev) {
6818                 BUG();
6819                 goto out;
6820         }
6821
6822         /* Some actions do not requires the mutex */
6823         switch (cmd) {
6824         case GET_ARRAY_INFO:
6825                 if (!mddev->raid_disks && !mddev->external)
6826                         err = -ENODEV;
6827                 else
6828                         err = get_array_info(mddev, argp);
6829                 goto out;
6830
6831         case GET_DISK_INFO:
6832                 if (!mddev->raid_disks && !mddev->external)
6833                         err = -ENODEV;
6834                 else
6835                         err = get_disk_info(mddev, argp);
6836                 goto out;
6837
6838         case SET_DISK_FAULTY:
6839                 err = set_disk_faulty(mddev, new_decode_dev(arg));
6840                 goto out;
6841
6842         case GET_BITMAP_FILE:
6843                 err = get_bitmap_file(mddev, argp);
6844                 goto out;
6845
6846         }
6847
6848         if (cmd == ADD_NEW_DISK)
6849                 /* need to ensure md_delayed_delete() has completed */
6850                 flush_workqueue(md_misc_wq);
6851
6852         if (cmd == HOT_REMOVE_DISK)
6853                 /* need to ensure recovery thread has run */
6854                 wait_event_interruptible_timeout(mddev->sb_wait,
6855                                                  !test_bit(MD_RECOVERY_NEEDED,
6856                                                            &mddev->recovery),
6857                                                  msecs_to_jiffies(5000));
6858         if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
6859                 /* Need to flush page cache, and ensure no-one else opens
6860                  * and writes
6861                  */
6862                 mutex_lock(&mddev->open_mutex);
6863                 if (mddev->pers && atomic_read(&mddev->openers) > 1) {
6864                         mutex_unlock(&mddev->open_mutex);
6865                         err = -EBUSY;
6866                         goto out;
6867                 }
6868                 if (test_and_set_bit(MD_CLOSING, &mddev->flags)) {
6869                         mutex_unlock(&mddev->open_mutex);
6870                         err = -EBUSY;
6871                         goto out;
6872                 }
6873                 did_set_md_closing = true;
6874                 mutex_unlock(&mddev->open_mutex);
6875                 sync_blockdev(bdev);
6876         }
6877         err = mddev_lock(mddev);
6878         if (err) {
6879                 printk(KERN_INFO
6880                         "md: ioctl lock interrupted, reason %d, cmd %d\n",
6881                         err, cmd);
6882                 goto out;
6883         }
6884
6885         if (cmd == SET_ARRAY_INFO) {
6886                 mdu_array_info_t info;
6887                 if (!arg)
6888                         memset(&info, 0, sizeof(info));
6889                 else if (copy_from_user(&info, argp, sizeof(info))) {
6890                         err = -EFAULT;
6891                         goto unlock;
6892                 }
6893                 if (mddev->pers) {
6894                         err = update_array_info(mddev, &info);
6895                         if (err) {
6896                                 printk(KERN_WARNING "md: couldn't update"
6897                                        " array info. %d\n", err);
6898                                 goto unlock;
6899                         }
6900                         goto unlock;
6901                 }
6902                 if (!list_empty(&mddev->disks)) {
6903                         printk(KERN_WARNING
6904                                "md: array %s already has disks!\n",
6905                                mdname(mddev));
6906                         err = -EBUSY;
6907                         goto unlock;
6908                 }
6909                 if (mddev->raid_disks) {
6910                         printk(KERN_WARNING
6911                                "md: array %s already initialised!\n",
6912                                mdname(mddev));
6913                         err = -EBUSY;
6914                         goto unlock;
6915                 }
6916                 err = set_array_info(mddev, &info);
6917                 if (err) {
6918                         printk(KERN_WARNING "md: couldn't set"
6919                                " array info. %d\n", err);
6920                         goto unlock;
6921                 }
6922                 goto unlock;
6923         }
6924
6925         /*
6926          * Commands querying/configuring an existing array:
6927          */
6928         /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
6929          * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
6930         if ((!mddev->raid_disks && !mddev->external)
6931             && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
6932             && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
6933             && cmd != GET_BITMAP_FILE) {
6934                 err = -ENODEV;
6935                 goto unlock;
6936         }
6937
6938         /*
6939          * Commands even a read-only array can execute:
6940          */
6941         switch (cmd) {
6942         case RESTART_ARRAY_RW:
6943                 err = restart_array(mddev);
6944                 goto unlock;
6945
6946         case STOP_ARRAY:
6947                 err = do_md_stop(mddev, 0, bdev);
6948                 goto unlock;
6949
6950         case STOP_ARRAY_RO:
6951                 err = md_set_readonly(mddev, bdev);
6952                 goto unlock;
6953
6954         case HOT_REMOVE_DISK:
6955                 err = hot_remove_disk(mddev, new_decode_dev(arg));
6956                 goto unlock;
6957
6958         case ADD_NEW_DISK:
6959                 /* We can support ADD_NEW_DISK on read-only arrays
6960                  * only if we are re-adding a preexisting device.
6961                  * So require mddev->pers and MD_DISK_SYNC.
6962                  */
6963                 if (mddev->pers) {
6964                         mdu_disk_info_t info;
6965                         if (copy_from_user(&info, argp, sizeof(info)))
6966                                 err = -EFAULT;
6967                         else if (!(info.state & (1<<MD_DISK_SYNC)))
6968                                 /* Need to clear read-only for this */
6969                                 break;
6970                         else
6971                                 err = add_new_disk(mddev, &info);
6972                         goto unlock;
6973                 }
6974                 break;
6975
6976         case BLKROSET:
6977                 if (get_user(ro, (int __user *)(arg))) {
6978                         err = -EFAULT;
6979                         goto unlock;
6980                 }
6981                 err = -EINVAL;
6982
6983                 /* if the bdev is going readonly the value of mddev->ro
6984                  * does not matter, no writes are coming
6985                  */
6986                 if (ro)
6987                         goto unlock;
6988
6989                 /* are we are already prepared for writes? */
6990                 if (mddev->ro != 1)
6991                         goto unlock;
6992
6993                 /* transitioning to readauto need only happen for
6994                  * arrays that call md_write_start
6995                  */
6996                 if (mddev->pers) {
6997                         err = restart_array(mddev);
6998                         if (err == 0) {
6999                                 mddev->ro = 2;
7000                                 set_disk_ro(mddev->gendisk, 0);
7001                         }
7002                 }
7003                 goto unlock;
7004         }
7005
7006         /*
7007          * The remaining ioctls are changing the state of the
7008          * superblock, so we do not allow them on read-only arrays.
7009          */
7010         if (mddev->ro && mddev->pers) {
7011                 if (mddev->ro == 2) {
7012                         mddev->ro = 0;
7013                         sysfs_notify_dirent_safe(mddev->sysfs_state);
7014                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7015                         /* mddev_unlock will wake thread */
7016                         /* If a device failed while we were read-only, we
7017                          * need to make sure the metadata is updated now.
7018                          */
7019                         if (test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
7020                                 mddev_unlock(mddev);
7021                                 wait_event(mddev->sb_wait,
7022                                            !test_bit(MD_CHANGE_DEVS, &mddev->flags) &&
7023                                            !test_bit(MD_CHANGE_PENDING, &mddev->flags));
7024                                 mddev_lock_nointr(mddev);
7025                         }
7026                 } else {
7027                         err = -EROFS;
7028                         goto unlock;
7029                 }
7030         }
7031
7032         switch (cmd) {
7033         case ADD_NEW_DISK:
7034         {
7035                 mdu_disk_info_t info;
7036                 if (copy_from_user(&info, argp, sizeof(info)))
7037                         err = -EFAULT;
7038                 else
7039                         err = add_new_disk(mddev, &info);
7040                 goto unlock;
7041         }
7042
7043         case CLUSTERED_DISK_NACK:
7044                 if (mddev_is_clustered(mddev))
7045                         md_cluster_ops->new_disk_ack(mddev, false);
7046                 else
7047                         err = -EINVAL;
7048                 goto unlock;
7049
7050         case HOT_ADD_DISK:
7051                 err = hot_add_disk(mddev, new_decode_dev(arg));
7052                 goto unlock;
7053
7054         case RUN_ARRAY:
7055                 err = do_md_run(mddev);
7056                 goto unlock;
7057
7058         case SET_BITMAP_FILE:
7059                 err = set_bitmap_file(mddev, (int)arg);
7060                 goto unlock;
7061
7062         default:
7063                 err = -EINVAL;
7064                 goto unlock;
7065         }
7066
7067 unlock:
7068         if (mddev->hold_active == UNTIL_IOCTL &&
7069             err != -EINVAL)
7070                 mddev->hold_active = 0;
7071         mddev_unlock(mddev);
7072 out:
7073         if(did_set_md_closing)
7074                 clear_bit(MD_CLOSING, &mddev->flags);
7075         return err;
7076 }
7077 #ifdef CONFIG_COMPAT
7078 static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
7079                     unsigned int cmd, unsigned long arg)
7080 {
7081         switch (cmd) {
7082         case HOT_REMOVE_DISK:
7083         case HOT_ADD_DISK:
7084         case SET_DISK_FAULTY:
7085         case SET_BITMAP_FILE:
7086                 /* These take in integer arg, do not convert */
7087                 break;
7088         default:
7089                 arg = (unsigned long)compat_ptr(arg);
7090                 break;
7091         }
7092
7093         return md_ioctl(bdev, mode, cmd, arg);
7094 }
7095 #endif /* CONFIG_COMPAT */
7096
7097 static int md_open(struct block_device *bdev, fmode_t mode)
7098 {
7099         /*
7100          * Succeed if we can lock the mddev, which confirms that
7101          * it isn't being stopped right now.
7102          */
7103         struct mddev *mddev = mddev_find(bdev->bd_dev);
7104         int err;
7105
7106         if (!mddev)
7107                 return -ENODEV;
7108
7109         if (mddev->gendisk != bdev->bd_disk) {
7110                 /* we are racing with mddev_put which is discarding this
7111                  * bd_disk.
7112                  */
7113                 mddev_put(mddev);
7114                 /* Wait until bdev->bd_disk is definitely gone */
7115                 if (work_pending(&mddev->del_work))
7116                         flush_workqueue(md_misc_wq);
7117                 return -EBUSY;
7118         }
7119         BUG_ON(mddev != bdev->bd_disk->private_data);
7120
7121         if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
7122                 goto out;
7123
7124         if (test_bit(MD_CLOSING, &mddev->flags)) {
7125                 mutex_unlock(&mddev->open_mutex);
7126                 err = -ENODEV;
7127                 goto out;
7128         }
7129
7130         err = 0;
7131         atomic_inc(&mddev->openers);
7132         mutex_unlock(&mddev->open_mutex);
7133
7134         check_disk_change(bdev);
7135  out:
7136         if (err)
7137                 mddev_put(mddev);
7138         return err;
7139 }
7140
7141 static void md_release(struct gendisk *disk, fmode_t mode)
7142 {
7143         struct mddev *mddev = disk->private_data;
7144
7145         BUG_ON(!mddev);
7146         atomic_dec(&mddev->openers);
7147         mddev_put(mddev);
7148 }
7149
7150 static int md_media_changed(struct gendisk *disk)
7151 {
7152         struct mddev *mddev = disk->private_data;
7153
7154         return mddev->changed;
7155 }
7156
7157 static int md_revalidate(struct gendisk *disk)
7158 {
7159         struct mddev *mddev = disk->private_data;
7160
7161         mddev->changed = 0;
7162         return 0;
7163 }
7164 static const struct block_device_operations md_fops =
7165 {
7166         .owner          = THIS_MODULE,
7167         .open           = md_open,
7168         .release        = md_release,
7169         .ioctl          = md_ioctl,
7170 #ifdef CONFIG_COMPAT
7171         .compat_ioctl   = md_compat_ioctl,
7172 #endif
7173         .getgeo         = md_getgeo,
7174         .media_changed  = md_media_changed,
7175         .revalidate_disk= md_revalidate,
7176 };
7177
7178 static int md_thread(void *arg)
7179 {
7180         struct md_thread *thread = arg;
7181
7182         /*
7183          * md_thread is a 'system-thread', it's priority should be very
7184          * high. We avoid resource deadlocks individually in each
7185          * raid personality. (RAID5 does preallocation) We also use RR and
7186          * the very same RT priority as kswapd, thus we will never get
7187          * into a priority inversion deadlock.
7188          *
7189          * we definitely have to have equal or higher priority than
7190          * bdflush, otherwise bdflush will deadlock if there are too
7191          * many dirty RAID5 blocks.
7192          */
7193
7194         allow_signal(SIGKILL);
7195         while (!kthread_should_stop()) {
7196
7197                 /* We need to wait INTERRUPTIBLE so that
7198                  * we don't add to the load-average.
7199                  * That means we need to be sure no signals are
7200                  * pending
7201                  */
7202                 if (signal_pending(current))
7203                         flush_signals(current);
7204
7205                 wait_event_interruptible_timeout
7206                         (thread->wqueue,
7207                          test_bit(THREAD_WAKEUP, &thread->flags)
7208                          || kthread_should_stop(),
7209                          thread->timeout);
7210
7211                 clear_bit(THREAD_WAKEUP, &thread->flags);
7212                 if (!kthread_should_stop())
7213                         thread->run(thread);
7214         }
7215
7216         return 0;
7217 }
7218
7219 void md_wakeup_thread(struct md_thread *thread)
7220 {
7221         if (thread) {
7222                 pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm);
7223                 set_bit(THREAD_WAKEUP, &thread->flags);
7224                 wake_up(&thread->wqueue);
7225         }
7226 }
7227 EXPORT_SYMBOL(md_wakeup_thread);
7228
7229 struct md_thread *md_register_thread(void (*run) (struct md_thread *),
7230                 struct mddev *mddev, const char *name)
7231 {
7232         struct md_thread *thread;
7233
7234         thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL);
7235         if (!thread)
7236                 return NULL;
7237
7238         init_waitqueue_head(&thread->wqueue);
7239
7240         thread->run = run;
7241         thread->mddev = mddev;
7242         thread->timeout = MAX_SCHEDULE_TIMEOUT;
7243         thread->tsk = kthread_run(md_thread, thread,
7244                                   "%s_%s",
7245                                   mdname(thread->mddev),
7246                                   name);
7247         if (IS_ERR(thread->tsk)) {
7248                 kfree(thread);
7249                 return NULL;
7250         }
7251         return thread;
7252 }
7253 EXPORT_SYMBOL(md_register_thread);
7254
7255 void md_unregister_thread(struct md_thread **threadp)
7256 {
7257         struct md_thread *thread;
7258
7259         /*
7260          * Locking ensures that mddev_unlock does not wake_up a
7261          * non-existent thread
7262          */
7263         spin_lock(&pers_lock);
7264         thread = *threadp;
7265         if (!thread) {
7266                 spin_unlock(&pers_lock);
7267                 return;
7268         }
7269         *threadp = NULL;
7270         spin_unlock(&pers_lock);
7271
7272         pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
7273         kthread_stop(thread->tsk);
7274         kfree(thread);
7275 }
7276 EXPORT_SYMBOL(md_unregister_thread);
7277
7278 void md_error(struct mddev *mddev, struct md_rdev *rdev)
7279 {
7280         if (!rdev || test_bit(Faulty, &rdev->flags))
7281                 return;
7282
7283         if (!mddev->pers || !mddev->pers->error_handler)
7284                 return;
7285         mddev->pers->error_handler(mddev,rdev);
7286         if (mddev->degraded)
7287                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
7288         sysfs_notify_dirent_safe(rdev->sysfs_state);
7289         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7290         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7291         md_wakeup_thread(mddev->thread);
7292         if (mddev->event_work.func)
7293                 queue_work(md_misc_wq, &mddev->event_work);
7294         md_new_event(mddev);
7295 }
7296 EXPORT_SYMBOL(md_error);
7297
7298 /* seq_file implementation /proc/mdstat */
7299
7300 static void status_unused(struct seq_file *seq)
7301 {
7302         int i = 0;
7303         struct md_rdev *rdev;
7304
7305         seq_printf(seq, "unused devices: ");
7306
7307         list_for_each_entry(rdev, &pending_raid_disks, same_set) {
7308                 char b[BDEVNAME_SIZE];
7309                 i++;
7310                 seq_printf(seq, "%s ",
7311                               bdevname(rdev->bdev,b));
7312         }
7313         if (!i)
7314                 seq_printf(seq, "<none>");
7315
7316         seq_printf(seq, "\n");
7317 }
7318
7319 static int status_resync(struct seq_file *seq, struct mddev *mddev)
7320 {
7321         sector_t max_sectors, resync, res;
7322         unsigned long dt, db = 0;
7323         sector_t rt, curr_mark_cnt, resync_mark_cnt;
7324         int scale, recovery_active;
7325         unsigned int per_milli;
7326
7327         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
7328             test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
7329                 max_sectors = mddev->resync_max_sectors;
7330         else
7331                 max_sectors = mddev->dev_sectors;
7332
7333         resync = mddev->curr_resync;
7334         if (resync <= 3) {
7335                 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
7336                         /* Still cleaning up */
7337                         resync = max_sectors;
7338         } else
7339                 resync -= atomic_read(&mddev->recovery_active);
7340
7341         if (resync == 0) {
7342                 if (mddev->recovery_cp < MaxSector) {
7343                         seq_printf(seq, "\tresync=PENDING");
7344                         return 1;
7345                 }
7346                 return 0;
7347         }
7348         if (resync < 3) {
7349                 seq_printf(seq, "\tresync=DELAYED");
7350                 return 1;
7351         }
7352
7353         WARN_ON(max_sectors == 0);
7354         /* Pick 'scale' such that (resync>>scale)*1000 will fit
7355          * in a sector_t, and (max_sectors>>scale) will fit in a
7356          * u32, as those are the requirements for sector_div.
7357          * Thus 'scale' must be at least 10
7358          */
7359         scale = 10;
7360         if (sizeof(sector_t) > sizeof(unsigned long)) {
7361                 while ( max_sectors/2 > (1ULL<<(scale+32)))
7362                         scale++;
7363         }
7364         res = (resync>>scale)*1000;
7365         sector_div(res, (u32)((max_sectors>>scale)+1));
7366
7367         per_milli = res;
7368         {
7369                 int i, x = per_milli/50, y = 20-x;
7370                 seq_printf(seq, "[");
7371                 for (i = 0; i < x; i++)
7372                         seq_printf(seq, "=");
7373                 seq_printf(seq, ">");
7374                 for (i = 0; i < y; i++)
7375                         seq_printf(seq, ".");
7376                 seq_printf(seq, "] ");
7377         }
7378         seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
7379                    (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
7380                     "reshape" :
7381                     (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
7382                      "check" :
7383                      (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
7384                       "resync" : "recovery"))),
7385                    per_milli/10, per_milli % 10,
7386                    (unsigned long long) resync/2,
7387                    (unsigned long long) max_sectors/2);
7388
7389         /*
7390          * dt: time from mark until now
7391          * db: blocks written from mark until now
7392          * rt: remaining time
7393          *
7394          * rt is a sector_t, which is always 64bit now. We are keeping
7395          * the original algorithm, but it is not really necessary.
7396          *
7397          * Original algorithm:
7398          *   So we divide before multiply in case it is 32bit and close
7399          *   to the limit.
7400          *   We scale the divisor (db) by 32 to avoid losing precision
7401          *   near the end of resync when the number of remaining sectors
7402          *   is close to 'db'.
7403          *   We then divide rt by 32 after multiplying by db to compensate.
7404          *   The '+1' avoids division by zero if db is very small.
7405          */
7406         dt = ((jiffies - mddev->resync_mark) / HZ);
7407         if (!dt) dt++;
7408
7409         curr_mark_cnt = mddev->curr_mark_cnt;
7410         recovery_active = atomic_read(&mddev->recovery_active);
7411         resync_mark_cnt = mddev->resync_mark_cnt;
7412
7413         if (curr_mark_cnt >= (recovery_active + resync_mark_cnt))
7414                 db = curr_mark_cnt - (recovery_active + resync_mark_cnt);
7415
7416         rt = max_sectors - resync;    /* number of remaining sectors */
7417         rt = div64_u64(rt, db/32+1);
7418         rt *= dt;
7419         rt >>= 5;
7420
7421         seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
7422                    ((unsigned long)rt % 60)/6);
7423
7424         seq_printf(seq, " speed=%ldK/sec", db/2/dt);
7425         return 1;
7426 }
7427
7428 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
7429 {
7430         struct list_head *tmp;
7431         loff_t l = *pos;
7432         struct mddev *mddev;
7433
7434         if (l >= 0x10000)
7435                 return NULL;
7436         if (!l--)
7437                 /* header */
7438                 return (void*)1;
7439
7440         spin_lock(&all_mddevs_lock);
7441         list_for_each(tmp,&all_mddevs)
7442                 if (!l--) {
7443                         mddev = list_entry(tmp, struct mddev, all_mddevs);
7444                         mddev_get(mddev);
7445                         spin_unlock(&all_mddevs_lock);
7446                         return mddev;
7447                 }
7448         spin_unlock(&all_mddevs_lock);
7449         if (!l--)
7450                 return (void*)2;/* tail */
7451         return NULL;
7452 }
7453
7454 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
7455 {
7456         struct list_head *tmp;
7457         struct mddev *next_mddev, *mddev = v;
7458
7459         ++*pos;
7460         if (v == (void*)2)
7461                 return NULL;
7462
7463         spin_lock(&all_mddevs_lock);
7464         if (v == (void*)1)
7465                 tmp = all_mddevs.next;
7466         else
7467                 tmp = mddev->all_mddevs.next;
7468         if (tmp != &all_mddevs)
7469                 next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs));
7470         else {
7471                 next_mddev = (void*)2;
7472                 *pos = 0x10000;
7473         }
7474         spin_unlock(&all_mddevs_lock);
7475
7476         if (v != (void*)1)
7477                 mddev_put(mddev);
7478         return next_mddev;
7479
7480 }
7481
7482 static void md_seq_stop(struct seq_file *seq, void *v)
7483 {
7484         struct mddev *mddev = v;
7485
7486         if (mddev && v != (void*)1 && v != (void*)2)
7487                 mddev_put(mddev);
7488 }
7489
7490 static int md_seq_show(struct seq_file *seq, void *v)
7491 {
7492         struct mddev *mddev = v;
7493         sector_t sectors;
7494         struct md_rdev *rdev;
7495
7496         if (v == (void*)1) {
7497                 struct md_personality *pers;
7498                 seq_printf(seq, "Personalities : ");
7499                 spin_lock(&pers_lock);
7500                 list_for_each_entry(pers, &pers_list, list)
7501                         seq_printf(seq, "[%s] ", pers->name);
7502
7503                 spin_unlock(&pers_lock);
7504                 seq_printf(seq, "\n");
7505                 seq->poll_event = atomic_read(&md_event_count);
7506                 return 0;
7507         }
7508         if (v == (void*)2) {
7509                 status_unused(seq);
7510                 return 0;
7511         }
7512
7513         spin_lock(&mddev->lock);
7514         if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
7515                 seq_printf(seq, "%s : %sactive", mdname(mddev),
7516                                                 mddev->pers ? "" : "in");
7517                 if (mddev->pers) {
7518                         if (mddev->ro==1)
7519                                 seq_printf(seq, " (read-only)");
7520                         if (mddev->ro==2)
7521                                 seq_printf(seq, " (auto-read-only)");
7522                         seq_printf(seq, " %s", mddev->pers->name);
7523                 }
7524
7525                 sectors = 0;
7526                 rcu_read_lock();
7527                 rdev_for_each_rcu(rdev, mddev) {
7528                         char b[BDEVNAME_SIZE];
7529                         seq_printf(seq, " %s[%d]",
7530                                 bdevname(rdev->bdev,b), rdev->desc_nr);
7531                         if (test_bit(WriteMostly, &rdev->flags))
7532                                 seq_printf(seq, "(W)");
7533                         if (test_bit(Journal, &rdev->flags))
7534                                 seq_printf(seq, "(J)");
7535                         if (test_bit(Faulty, &rdev->flags)) {
7536                                 seq_printf(seq, "(F)");
7537                                 continue;
7538                         }
7539                         if (rdev->raid_disk < 0)
7540                                 seq_printf(seq, "(S)"); /* spare */
7541                         if (test_bit(Replacement, &rdev->flags))
7542                                 seq_printf(seq, "(R)");
7543                         sectors += rdev->sectors;
7544                 }
7545                 rcu_read_unlock();
7546
7547                 if (!list_empty(&mddev->disks)) {
7548                         if (mddev->pers)
7549                                 seq_printf(seq, "\n      %llu blocks",
7550                                            (unsigned long long)
7551                                            mddev->array_sectors / 2);
7552                         else
7553                                 seq_printf(seq, "\n      %llu blocks",
7554                                            (unsigned long long)sectors / 2);
7555                 }
7556                 if (mddev->persistent) {
7557                         if (mddev->major_version != 0 ||
7558                             mddev->minor_version != 90) {
7559                                 seq_printf(seq," super %d.%d",
7560                                            mddev->major_version,
7561                                            mddev->minor_version);
7562                         }
7563                 } else if (mddev->external)
7564                         seq_printf(seq, " super external:%s",
7565                                    mddev->metadata_type);
7566                 else
7567                         seq_printf(seq, " super non-persistent");
7568
7569                 if (mddev->pers) {
7570                         mddev->pers->status(seq, mddev);
7571                         seq_printf(seq, "\n      ");
7572                         if (mddev->pers->sync_request) {
7573                                 if (status_resync(seq, mddev))
7574                                         seq_printf(seq, "\n      ");
7575                         }
7576                 } else
7577                         seq_printf(seq, "\n       ");
7578
7579                 bitmap_status(seq, mddev->bitmap);
7580
7581                 seq_printf(seq, "\n");
7582         }
7583         spin_unlock(&mddev->lock);
7584
7585         return 0;
7586 }
7587
7588 static const struct seq_operations md_seq_ops = {
7589         .start  = md_seq_start,
7590         .next   = md_seq_next,
7591         .stop   = md_seq_stop,
7592         .show   = md_seq_show,
7593 };
7594
7595 static int md_seq_open(struct inode *inode, struct file *file)
7596 {
7597         struct seq_file *seq;
7598         int error;
7599
7600         error = seq_open(file, &md_seq_ops);
7601         if (error)
7602                 return error;
7603
7604         seq = file->private_data;
7605         seq->poll_event = atomic_read(&md_event_count);
7606         return error;
7607 }
7608
7609 static int md_unloading;
7610 static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
7611 {
7612         struct seq_file *seq = filp->private_data;
7613         int mask;
7614
7615         if (md_unloading)
7616                 return POLLIN|POLLRDNORM|POLLERR|POLLPRI;
7617         poll_wait(filp, &md_event_waiters, wait);
7618
7619         /* always allow read */
7620         mask = POLLIN | POLLRDNORM;
7621
7622         if (seq->poll_event != atomic_read(&md_event_count))
7623                 mask |= POLLERR | POLLPRI;
7624         return mask;
7625 }
7626
7627 static const struct file_operations md_seq_fops = {
7628         .owner          = THIS_MODULE,
7629         .open           = md_seq_open,
7630         .read           = seq_read,
7631         .llseek         = seq_lseek,
7632         .release        = seq_release_private,
7633         .poll           = mdstat_poll,
7634 };
7635
7636 int register_md_personality(struct md_personality *p)
7637 {
7638         printk(KERN_INFO "md: %s personality registered for level %d\n",
7639                                                 p->name, p->level);
7640         spin_lock(&pers_lock);
7641         list_add_tail(&p->list, &pers_list);
7642         spin_unlock(&pers_lock);
7643         return 0;
7644 }
7645 EXPORT_SYMBOL(register_md_personality);
7646
7647 int unregister_md_personality(struct md_personality *p)
7648 {
7649         printk(KERN_INFO "md: %s personality unregistered\n", p->name);
7650         spin_lock(&pers_lock);
7651         list_del_init(&p->list);
7652         spin_unlock(&pers_lock);
7653         return 0;
7654 }
7655 EXPORT_SYMBOL(unregister_md_personality);
7656
7657 int register_md_cluster_operations(struct md_cluster_operations *ops,
7658                                    struct module *module)
7659 {
7660         int ret = 0;
7661         spin_lock(&pers_lock);
7662         if (md_cluster_ops != NULL)
7663                 ret = -EALREADY;
7664         else {
7665                 md_cluster_ops = ops;
7666                 md_cluster_mod = module;
7667         }
7668         spin_unlock(&pers_lock);
7669         return ret;
7670 }
7671 EXPORT_SYMBOL(register_md_cluster_operations);
7672
7673 int unregister_md_cluster_operations(void)
7674 {
7675         spin_lock(&pers_lock);
7676         md_cluster_ops = NULL;
7677         spin_unlock(&pers_lock);
7678         return 0;
7679 }
7680 EXPORT_SYMBOL(unregister_md_cluster_operations);
7681
7682 int md_setup_cluster(struct mddev *mddev, int nodes)
7683 {
7684         if (!md_cluster_ops)
7685                 request_module("md-cluster");
7686         spin_lock(&pers_lock);
7687         /* ensure module won't be unloaded */
7688         if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
7689                 pr_err("can't find md-cluster module or get it's reference.\n");
7690                 spin_unlock(&pers_lock);
7691                 return -ENOENT;
7692         }
7693         spin_unlock(&pers_lock);
7694
7695         return md_cluster_ops->join(mddev, nodes);
7696 }
7697
7698 void md_cluster_stop(struct mddev *mddev)
7699 {
7700         if (!md_cluster_ops)
7701                 return;
7702         md_cluster_ops->leave(mddev);
7703         module_put(md_cluster_mod);
7704 }
7705
7706 static int is_mddev_idle(struct mddev *mddev, int init)
7707 {
7708         struct md_rdev *rdev;
7709         int idle;
7710         int curr_events;
7711
7712         idle = 1;
7713         rcu_read_lock();
7714         rdev_for_each_rcu(rdev, mddev) {
7715                 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
7716                 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
7717                               (int)part_stat_read(&disk->part0, sectors[1]) -
7718                               atomic_read(&disk->sync_io);
7719                 /* sync IO will cause sync_io to increase before the disk_stats
7720                  * as sync_io is counted when a request starts, and
7721                  * disk_stats is counted when it completes.
7722                  * So resync activity will cause curr_events to be smaller than
7723                  * when there was no such activity.
7724                  * non-sync IO will cause disk_stat to increase without
7725                  * increasing sync_io so curr_events will (eventually)
7726                  * be larger than it was before.  Once it becomes
7727                  * substantially larger, the test below will cause
7728                  * the array to appear non-idle, and resync will slow
7729                  * down.
7730                  * If there is a lot of outstanding resync activity when
7731                  * we set last_event to curr_events, then all that activity
7732                  * completing might cause the array to appear non-idle
7733                  * and resync will be slowed down even though there might
7734                  * not have been non-resync activity.  This will only
7735                  * happen once though.  'last_events' will soon reflect
7736                  * the state where there is little or no outstanding
7737                  * resync requests, and further resync activity will
7738                  * always make curr_events less than last_events.
7739                  *
7740                  */
7741                 if (init || curr_events - rdev->last_events > 64) {
7742                         rdev->last_events = curr_events;
7743                         idle = 0;
7744                 }
7745         }
7746         rcu_read_unlock();
7747         return idle;
7748 }
7749
7750 void md_done_sync(struct mddev *mddev, int blocks, int ok)
7751 {
7752         /* another "blocks" (512byte) blocks have been synced */
7753         atomic_sub(blocks, &mddev->recovery_active);
7754         wake_up(&mddev->recovery_wait);
7755         if (!ok) {
7756                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7757                 set_bit(MD_RECOVERY_ERROR, &mddev->recovery);
7758                 md_wakeup_thread(mddev->thread);
7759                 // stop recovery, signal do_sync ....
7760         }
7761 }
7762 EXPORT_SYMBOL(md_done_sync);
7763
7764 /* md_write_start(mddev, bi)
7765  * If we need to update some array metadata (e.g. 'active' flag
7766  * in superblock) before writing, schedule a superblock update
7767  * and wait for it to complete.
7768  */
7769 void md_write_start(struct mddev *mddev, struct bio *bi)
7770 {
7771         int did_change = 0;
7772         if (bio_data_dir(bi) != WRITE)
7773                 return;
7774
7775         BUG_ON(mddev->ro == 1);
7776         if (mddev->ro == 2) {
7777                 /* need to switch to read/write */
7778                 mddev->ro = 0;
7779                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7780                 md_wakeup_thread(mddev->thread);
7781                 md_wakeup_thread(mddev->sync_thread);
7782                 did_change = 1;
7783         }
7784         atomic_inc(&mddev->writes_pending);
7785         if (mddev->safemode == 1)
7786                 mddev->safemode = 0;
7787         if (mddev->in_sync) {
7788                 spin_lock(&mddev->lock);
7789                 if (mddev->in_sync) {
7790                         mddev->in_sync = 0;
7791                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
7792                         set_bit(MD_CHANGE_PENDING, &mddev->flags);
7793                         md_wakeup_thread(mddev->thread);
7794                         did_change = 1;
7795                 }
7796                 spin_unlock(&mddev->lock);
7797         }
7798         if (did_change)
7799                 sysfs_notify_dirent_safe(mddev->sysfs_state);
7800         wait_event(mddev->sb_wait,
7801                    !test_bit(MD_CHANGE_PENDING, &mddev->flags));
7802 }
7803 EXPORT_SYMBOL(md_write_start);
7804
7805 void md_write_end(struct mddev *mddev)
7806 {
7807         if (atomic_dec_and_test(&mddev->writes_pending)) {
7808                 if (mddev->safemode == 2)
7809                         md_wakeup_thread(mddev->thread);
7810                 else if (mddev->safemode_delay)
7811                         mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
7812         }
7813 }
7814 EXPORT_SYMBOL(md_write_end);
7815
7816 /* md_allow_write(mddev)
7817  * Calling this ensures that the array is marked 'active' so that writes
7818  * may proceed without blocking.  It is important to call this before
7819  * attempting a GFP_KERNEL allocation while holding the mddev lock.
7820  * Must be called with mddev_lock held.
7821  *
7822  * In the ->external case MD_CHANGE_PENDING can not be cleared until mddev->lock
7823  * is dropped, so return -EAGAIN after notifying userspace.
7824  */
7825 int md_allow_write(struct mddev *mddev)
7826 {
7827         if (!mddev->pers)
7828                 return 0;
7829         if (mddev->ro)
7830                 return 0;
7831         if (!mddev->pers->sync_request)
7832                 return 0;
7833
7834         spin_lock(&mddev->lock);
7835         if (mddev->in_sync) {
7836                 mddev->in_sync = 0;
7837                 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
7838                 set_bit(MD_CHANGE_PENDING, &mddev->flags);
7839                 if (mddev->safemode_delay &&
7840                     mddev->safemode == 0)
7841                         mddev->safemode = 1;
7842                 spin_unlock(&mddev->lock);
7843                 md_update_sb(mddev, 0);
7844                 sysfs_notify_dirent_safe(mddev->sysfs_state);
7845         } else
7846                 spin_unlock(&mddev->lock);
7847
7848         if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
7849                 return -EAGAIN;
7850         else
7851                 return 0;
7852 }
7853 EXPORT_SYMBOL_GPL(md_allow_write);
7854
7855 #define SYNC_MARKS      10
7856 #define SYNC_MARK_STEP  (3*HZ)
7857 #define UPDATE_FREQUENCY (5*60*HZ)
7858 void md_do_sync(struct md_thread *thread)
7859 {
7860         struct mddev *mddev = thread->mddev;
7861         struct mddev *mddev2;
7862         unsigned int currspeed = 0,
7863                  window;
7864         sector_t max_sectors,j, io_sectors, recovery_done;
7865         unsigned long mark[SYNC_MARKS];
7866         unsigned long update_time;
7867         sector_t mark_cnt[SYNC_MARKS];
7868         int last_mark,m;
7869         struct list_head *tmp;
7870         sector_t last_check;
7871         int skipped = 0;
7872         struct md_rdev *rdev;
7873         char *desc, *action = NULL;
7874         struct blk_plug plug;
7875         int ret;
7876
7877         /* just incase thread restarts... */
7878         if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
7879                 return;
7880         if (mddev->ro) {/* never try to sync a read-only array */
7881                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7882                 return;
7883         }
7884
7885         if (mddev_is_clustered(mddev)) {
7886                 ret = md_cluster_ops->resync_start(mddev);
7887                 if (ret)
7888                         goto skip;
7889
7890                 set_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags);
7891                 if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
7892                         test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ||
7893                         test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
7894                      && ((unsigned long long)mddev->curr_resync_completed
7895                          < (unsigned long long)mddev->resync_max_sectors))
7896                         goto skip;
7897         }
7898
7899         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
7900                 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
7901                         desc = "data-check";
7902                         action = "check";
7903                 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
7904                         desc = "requested-resync";
7905                         action = "repair";
7906                 } else
7907                         desc = "resync";
7908         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
7909                 desc = "reshape";
7910         else
7911                 desc = "recovery";
7912
7913         mddev->last_sync_action = action ?: desc;
7914
7915         /* we overload curr_resync somewhat here.
7916          * 0 == not engaged in resync at all
7917          * 2 == checking that there is no conflict with another sync
7918          * 1 == like 2, but have yielded to allow conflicting resync to
7919          *              commense
7920          * other == active in resync - this many blocks
7921          *
7922          * Before starting a resync we must have set curr_resync to
7923          * 2, and then checked that every "conflicting" array has curr_resync
7924          * less than ours.  When we find one that is the same or higher
7925          * we wait on resync_wait.  To avoid deadlock, we reduce curr_resync
7926          * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
7927          * This will mean we have to start checking from the beginning again.
7928          *
7929          */
7930
7931         do {
7932                 int mddev2_minor = -1;
7933                 mddev->curr_resync = 2;
7934
7935         try_again:
7936                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
7937                         goto skip;
7938                 for_each_mddev(mddev2, tmp) {
7939                         if (mddev2 == mddev)
7940                                 continue;
7941                         if (!mddev->parallel_resync
7942                         &&  mddev2->curr_resync
7943                         &&  match_mddev_units(mddev, mddev2)) {
7944                                 DEFINE_WAIT(wq);
7945                                 if (mddev < mddev2 && mddev->curr_resync == 2) {
7946                                         /* arbitrarily yield */
7947                                         mddev->curr_resync = 1;
7948                                         wake_up(&resync_wait);
7949                                 }
7950                                 if (mddev > mddev2 && mddev->curr_resync == 1)
7951                                         /* no need to wait here, we can wait the next
7952                                          * time 'round when curr_resync == 2
7953                                          */
7954                                         continue;
7955                                 /* We need to wait 'interruptible' so as not to
7956                                  * contribute to the load average, and not to
7957                                  * be caught by 'softlockup'
7958                                  */
7959                                 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
7960                                 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
7961                                     mddev2->curr_resync >= mddev->curr_resync) {
7962                                         if (mddev2_minor != mddev2->md_minor) {
7963                                                 mddev2_minor = mddev2->md_minor;
7964                                                 printk(KERN_INFO "md: delaying %s of %s"
7965                                                        " until %s has finished (they"
7966                                                        " share one or more physical units)\n",
7967                                                        desc, mdname(mddev),
7968                                                        mdname(mddev2));
7969                                         }
7970                                         mddev_put(mddev2);
7971                                         if (signal_pending(current))
7972                                                 flush_signals(current);
7973                                         schedule();
7974                                         finish_wait(&resync_wait, &wq);
7975                                         goto try_again;
7976                                 }
7977                                 finish_wait(&resync_wait, &wq);
7978                         }
7979                 }
7980         } while (mddev->curr_resync < 2);
7981
7982         j = 0;
7983         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
7984                 /* resync follows the size requested by the personality,
7985                  * which defaults to physical size, but can be virtual size
7986                  */
7987                 max_sectors = mddev->resync_max_sectors;
7988                 atomic64_set(&mddev->resync_mismatches, 0);
7989                 /* we don't use the checkpoint if there's a bitmap */
7990                 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
7991                         j = mddev->resync_min;
7992                 else if (!mddev->bitmap)
7993                         j = mddev->recovery_cp;
7994
7995         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
7996                 max_sectors = mddev->resync_max_sectors;
7997         else {
7998                 /* recovery follows the physical size of devices */
7999                 max_sectors = mddev->dev_sectors;
8000                 j = MaxSector;
8001                 rcu_read_lock();
8002                 rdev_for_each_rcu(rdev, mddev)
8003                         if (rdev->raid_disk >= 0 &&
8004                             !test_bit(Journal, &rdev->flags) &&
8005                             !test_bit(Faulty, &rdev->flags) &&
8006                             !test_bit(In_sync, &rdev->flags) &&
8007                             rdev->recovery_offset < j)
8008                                 j = rdev->recovery_offset;
8009                 rcu_read_unlock();
8010
8011                 /* If there is a bitmap, we need to make sure all
8012                  * writes that started before we added a spare
8013                  * complete before we start doing a recovery.
8014                  * Otherwise the write might complete and (via
8015                  * bitmap_endwrite) set a bit in the bitmap after the
8016                  * recovery has checked that bit and skipped that
8017                  * region.
8018                  */
8019                 if (mddev->bitmap) {
8020                         mddev->pers->quiesce(mddev, 1);
8021                         mddev->pers->quiesce(mddev, 0);
8022                 }
8023         }
8024
8025         printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
8026         printk(KERN_INFO "md: minimum _guaranteed_  speed:"
8027                 " %d KB/sec/disk.\n", speed_min(mddev));
8028         printk(KERN_INFO "md: using maximum available idle IO bandwidth "
8029                "(but not more than %d KB/sec) for %s.\n",
8030                speed_max(mddev), desc);
8031
8032         is_mddev_idle(mddev, 1); /* this initializes IO event counters */
8033
8034         io_sectors = 0;
8035         for (m = 0; m < SYNC_MARKS; m++) {
8036                 mark[m] = jiffies;
8037                 mark_cnt[m] = io_sectors;
8038         }
8039         last_mark = 0;
8040         mddev->resync_mark = mark[last_mark];
8041         mddev->resync_mark_cnt = mark_cnt[last_mark];
8042
8043         /*
8044          * Tune reconstruction:
8045          */
8046         window = 32*(PAGE_SIZE/512);
8047         printk(KERN_INFO "md: using %dk window, over a total of %lluk.\n",
8048                 window/2, (unsigned long long)max_sectors/2);
8049
8050         atomic_set(&mddev->recovery_active, 0);
8051         last_check = 0;
8052
8053         if (j>2) {
8054                 printk(KERN_INFO
8055                        "md: resuming %s of %s from checkpoint.\n",
8056                        desc, mdname(mddev));
8057                 mddev->curr_resync = j;
8058         } else
8059                 mddev->curr_resync = 3; /* no longer delayed */
8060         mddev->curr_resync_completed = j;
8061         sysfs_notify(&mddev->kobj, NULL, "sync_completed");
8062         md_new_event(mddev);
8063         update_time = jiffies;
8064
8065         blk_start_plug(&plug);
8066         while (j < max_sectors) {
8067                 sector_t sectors;
8068
8069                 skipped = 0;
8070
8071                 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8072                     ((mddev->curr_resync > mddev->curr_resync_completed &&
8073                       (mddev->curr_resync - mddev->curr_resync_completed)
8074                       > (max_sectors >> 4)) ||
8075                      time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) ||
8076                      (j - mddev->curr_resync_completed)*2
8077                      >= mddev->resync_max - mddev->curr_resync_completed ||
8078                      mddev->curr_resync_completed > mddev->resync_max
8079                             )) {
8080                         /* time to update curr_resync_completed */
8081                         wait_event(mddev->recovery_wait,
8082                                    atomic_read(&mddev->recovery_active) == 0);
8083                         mddev->curr_resync_completed = j;
8084                         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
8085                             j > mddev->recovery_cp)
8086                                 mddev->recovery_cp = j;
8087                         update_time = jiffies;
8088                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
8089                         sysfs_notify(&mddev->kobj, NULL, "sync_completed");
8090                 }
8091
8092                 while (j >= mddev->resync_max &&
8093                        !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8094                         /* As this condition is controlled by user-space,
8095                          * we can block indefinitely, so use '_interruptible'
8096                          * to avoid triggering warnings.
8097                          */
8098                         flush_signals(current); /* just in case */
8099                         wait_event_interruptible(mddev->recovery_wait,
8100                                                  mddev->resync_max > j
8101                                                  || test_bit(MD_RECOVERY_INTR,
8102                                                              &mddev->recovery));
8103                 }
8104
8105                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8106                         break;
8107
8108                 sectors = mddev->pers->sync_request(mddev, j, &skipped);
8109                 if (sectors == 0) {
8110                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8111                         break;
8112                 }
8113
8114                 if (!skipped) { /* actual IO requested */
8115                         io_sectors += sectors;
8116                         atomic_add(sectors, &mddev->recovery_active);
8117                 }
8118
8119                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8120                         break;
8121
8122                 j += sectors;
8123                 if (j > max_sectors)
8124                         /* when skipping, extra large numbers can be returned. */
8125                         j = max_sectors;
8126                 if (j > 2)
8127                         mddev->curr_resync = j;
8128                 mddev->curr_mark_cnt = io_sectors;
8129                 if (last_check == 0)
8130                         /* this is the earliest that rebuild will be
8131                          * visible in /proc/mdstat
8132                          */
8133                         md_new_event(mddev);
8134
8135                 if (last_check + window > io_sectors || j == max_sectors)
8136                         continue;
8137
8138                 last_check = io_sectors;
8139         repeat:
8140                 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
8141                         /* step marks */
8142                         int next = (last_mark+1) % SYNC_MARKS;
8143
8144                         mddev->resync_mark = mark[next];
8145                         mddev->resync_mark_cnt = mark_cnt[next];
8146                         mark[next] = jiffies;
8147                         mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
8148                         last_mark = next;
8149                 }
8150
8151                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8152                         break;
8153
8154                 /*
8155                  * this loop exits only if either when we are slower than
8156                  * the 'hard' speed limit, or the system was IO-idle for
8157                  * a jiffy.
8158                  * the system might be non-idle CPU-wise, but we only care
8159                  * about not overloading the IO subsystem. (things like an
8160                  * e2fsck being done on the RAID array should execute fast)
8161                  */
8162                 cond_resched();
8163
8164                 recovery_done = io_sectors - atomic_read(&mddev->recovery_active);
8165                 currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2
8166                         /((jiffies-mddev->resync_mark)/HZ +1) +1;
8167
8168                 if (currspeed > speed_min(mddev)) {
8169                         if (currspeed > speed_max(mddev)) {
8170                                 msleep(500);
8171                                 goto repeat;
8172                         }
8173                         if (!is_mddev_idle(mddev, 0)) {
8174                                 /*
8175                                  * Give other IO more of a chance.
8176                                  * The faster the devices, the less we wait.
8177                                  */
8178                                 wait_event(mddev->recovery_wait,
8179                                            !atomic_read(&mddev->recovery_active));
8180                         }
8181                 }
8182         }
8183         printk(KERN_INFO "md: %s: %s %s.\n",mdname(mddev), desc,
8184                test_bit(MD_RECOVERY_INTR, &mddev->recovery)
8185                ? "interrupted" : "done");
8186         /*
8187          * this also signals 'finished resyncing' to md_stop
8188          */
8189         blk_finish_plug(&plug);
8190         wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
8191
8192         if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8193             !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
8194             mddev->curr_resync > 3) {
8195                 mddev->curr_resync_completed = mddev->curr_resync;
8196                 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
8197         }
8198         mddev->pers->sync_request(mddev, max_sectors, &skipped);
8199
8200         if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
8201             mddev->curr_resync > 3) {
8202                 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
8203                         if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8204                                 if (mddev->curr_resync >= mddev->recovery_cp) {
8205                                         printk(KERN_INFO
8206                                                "md: checkpointing %s of %s.\n",
8207                                                desc, mdname(mddev));
8208                                         if (test_bit(MD_RECOVERY_ERROR,
8209                                                 &mddev->recovery))
8210                                                 mddev->recovery_cp =
8211                                                         mddev->curr_resync_completed;
8212                                         else
8213                                                 mddev->recovery_cp =
8214                                                         mddev->curr_resync;
8215                                 }
8216                         } else
8217                                 mddev->recovery_cp = MaxSector;
8218                 } else {
8219                         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8220                                 mddev->curr_resync = MaxSector;
8221                         rcu_read_lock();
8222                         rdev_for_each_rcu(rdev, mddev)
8223                                 if (rdev->raid_disk >= 0 &&
8224                                     mddev->delta_disks >= 0 &&
8225                                     !test_bit(Journal, &rdev->flags) &&
8226                                     !test_bit(Faulty, &rdev->flags) &&
8227                                     !test_bit(In_sync, &rdev->flags) &&
8228                                     rdev->recovery_offset < mddev->curr_resync)
8229                                         rdev->recovery_offset = mddev->curr_resync;
8230                         rcu_read_unlock();
8231                 }
8232         }
8233  skip:
8234         /* set CHANGE_PENDING here since maybe another update is needed,
8235          * so other nodes are informed. It should be harmless for normal
8236          * raid */
8237         set_mask_bits(&mddev->flags, 0,
8238                       BIT(MD_CHANGE_PENDING) | BIT(MD_CHANGE_DEVS));
8239
8240         if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8241                         !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
8242                         mddev->delta_disks > 0 &&
8243                         mddev->pers->finish_reshape &&
8244                         mddev->pers->size &&
8245                         mddev->queue) {
8246                 mddev_lock_nointr(mddev);
8247                 md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
8248                 mddev_unlock(mddev);
8249                 set_capacity(mddev->gendisk, mddev->array_sectors);
8250                 revalidate_disk(mddev->gendisk);
8251         }
8252
8253         spin_lock(&mddev->lock);
8254         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8255                 /* We completed so min/max setting can be forgotten if used. */
8256                 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
8257                         mddev->resync_min = 0;
8258                 mddev->resync_max = MaxSector;
8259         } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
8260                 mddev->resync_min = mddev->curr_resync_completed;
8261         set_bit(MD_RECOVERY_DONE, &mddev->recovery);
8262         mddev->curr_resync = 0;
8263         spin_unlock(&mddev->lock);
8264
8265         wake_up(&resync_wait);
8266         md_wakeup_thread(mddev->thread);
8267         return;
8268 }
8269 EXPORT_SYMBOL_GPL(md_do_sync);
8270
8271 static int remove_and_add_spares(struct mddev *mddev,
8272                                  struct md_rdev *this)
8273 {
8274         struct md_rdev *rdev;
8275         int spares = 0;
8276         int removed = 0;
8277         bool remove_some = false;
8278
8279         if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
8280                 /* Mustn't remove devices when resync thread is running */
8281                 return 0;
8282
8283         rdev_for_each(rdev, mddev) {
8284                 if ((this == NULL || rdev == this) &&
8285                     rdev->raid_disk >= 0 &&
8286                     !test_bit(Blocked, &rdev->flags) &&
8287                     test_bit(Faulty, &rdev->flags) &&
8288                     atomic_read(&rdev->nr_pending)==0) {
8289                         /* Faulty non-Blocked devices with nr_pending == 0
8290                          * never get nr_pending incremented,
8291                          * never get Faulty cleared, and never get Blocked set.
8292                          * So we can synchronize_rcu now rather than once per device
8293                          */
8294                         remove_some = true;
8295                         set_bit(RemoveSynchronized, &rdev->flags);
8296                 }
8297         }
8298
8299         if (remove_some)
8300                 synchronize_rcu();
8301         rdev_for_each(rdev, mddev) {
8302                 if ((this == NULL || rdev == this) &&
8303                     rdev->raid_disk >= 0 &&
8304                     !test_bit(Blocked, &rdev->flags) &&
8305                     ((test_bit(RemoveSynchronized, &rdev->flags) ||
8306                      (!test_bit(In_sync, &rdev->flags) &&
8307                       !test_bit(Journal, &rdev->flags))) &&
8308                     atomic_read(&rdev->nr_pending)==0)) {
8309                         if (mddev->pers->hot_remove_disk(
8310                                     mddev, rdev) == 0) {
8311                                 sysfs_unlink_rdev(mddev, rdev);
8312                                 rdev->saved_raid_disk = rdev->raid_disk;
8313                                 rdev->raid_disk = -1;
8314                                 removed++;
8315                         }
8316                 }
8317                 if (remove_some && test_bit(RemoveSynchronized, &rdev->flags))
8318                         clear_bit(RemoveSynchronized, &rdev->flags);
8319         }
8320
8321         if (removed && mddev->kobj.sd)
8322                 sysfs_notify(&mddev->kobj, NULL, "degraded");
8323
8324         if (this && removed)
8325                 goto no_add;
8326
8327         rdev_for_each(rdev, mddev) {
8328                 if (this && this != rdev)
8329                         continue;
8330                 if (test_bit(Candidate, &rdev->flags))
8331                         continue;
8332                 if (rdev->raid_disk >= 0 &&
8333                     !test_bit(In_sync, &rdev->flags) &&
8334                     !test_bit(Journal, &rdev->flags) &&
8335                     !test_bit(Faulty, &rdev->flags))
8336                         spares++;
8337                 if (rdev->raid_disk >= 0)
8338                         continue;
8339                 if (test_bit(Faulty, &rdev->flags))
8340                         continue;
8341                 if (!test_bit(Journal, &rdev->flags)) {
8342                         if (mddev->ro &&
8343                             ! (rdev->saved_raid_disk >= 0 &&
8344                                !test_bit(Bitmap_sync, &rdev->flags)))
8345                                 continue;
8346
8347                         rdev->recovery_offset = 0;
8348                 }
8349                 if (mddev->pers->
8350                     hot_add_disk(mddev, rdev) == 0) {
8351                         if (sysfs_link_rdev(mddev, rdev))
8352                                 /* failure here is OK */;
8353                         if (!test_bit(Journal, &rdev->flags))
8354                                 spares++;
8355                         md_new_event(mddev);
8356                         set_bit(MD_CHANGE_DEVS, &mddev->flags);
8357                 }
8358         }
8359 no_add:
8360         if (removed)
8361                 set_bit(MD_CHANGE_DEVS, &mddev->flags);
8362         return spares;
8363 }
8364
8365 static void md_start_sync(struct work_struct *ws)
8366 {
8367         struct mddev *mddev = container_of(ws, struct mddev, del_work);
8368
8369         mddev->sync_thread = md_register_thread(md_do_sync,
8370                                                 mddev,
8371                                                 "resync");
8372         if (!mddev->sync_thread) {
8373                 printk(KERN_ERR "%s: could not start resync thread...\n",
8374                        mdname(mddev));
8375                 /* leave the spares where they are, it shouldn't hurt */
8376                 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8377                 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
8378                 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
8379                 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
8380                 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
8381                 wake_up(&resync_wait);
8382                 if (test_and_clear_bit(MD_RECOVERY_RECOVER,
8383                                        &mddev->recovery))
8384                         if (mddev->sysfs_action)
8385                                 sysfs_notify_dirent_safe(mddev->sysfs_action);
8386         } else
8387                 md_wakeup_thread(mddev->sync_thread);
8388         sysfs_notify_dirent_safe(mddev->sysfs_action);
8389         md_new_event(mddev);
8390 }
8391
8392 /*
8393  * This routine is regularly called by all per-raid-array threads to
8394  * deal with generic issues like resync and super-block update.
8395  * Raid personalities that don't have a thread (linear/raid0) do not
8396  * need this as they never do any recovery or update the superblock.
8397  *
8398  * It does not do any resync itself, but rather "forks" off other threads
8399  * to do that as needed.
8400  * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
8401  * "->recovery" and create a thread at ->sync_thread.
8402  * When the thread finishes it sets MD_RECOVERY_DONE
8403  * and wakeups up this thread which will reap the thread and finish up.
8404  * This thread also removes any faulty devices (with nr_pending == 0).
8405  *
8406  * The overall approach is:
8407  *  1/ if the superblock needs updating, update it.
8408  *  2/ If a recovery thread is running, don't do anything else.
8409  *  3/ If recovery has finished, clean up, possibly marking spares active.
8410  *  4/ If there are any faulty devices, remove them.
8411  *  5/ If array is degraded, try to add spares devices
8412  *  6/ If array has spares or is not in-sync, start a resync thread.
8413  */
8414 void md_check_recovery(struct mddev *mddev)
8415 {
8416         if (mddev->suspended)
8417                 return;
8418
8419         if (mddev->bitmap)
8420                 bitmap_daemon_work(mddev);
8421
8422         if (signal_pending(current)) {
8423                 if (mddev->pers->sync_request && !mddev->external) {
8424                         printk(KERN_INFO "md: %s in immediate safe mode\n",
8425                                mdname(mddev));
8426                         mddev->safemode = 2;
8427                 }
8428                 flush_signals(current);
8429         }
8430
8431         if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
8432                 return;
8433         if ( ! (
8434                 (mddev->flags & MD_UPDATE_SB_FLAGS & ~ (1<<MD_CHANGE_PENDING)) ||
8435                 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
8436                 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
8437                 test_bit(MD_RELOAD_SB, &mddev->flags) ||
8438                 (mddev->external == 0 && mddev->safemode == 1) ||
8439                 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
8440                  && !mddev->in_sync && mddev->recovery_cp == MaxSector)
8441                 ))
8442                 return;
8443
8444         if (mddev_trylock(mddev)) {
8445                 int spares = 0;
8446
8447                 if (mddev->ro) {
8448                         struct md_rdev *rdev;
8449                         if (!mddev->external && mddev->in_sync)
8450                                 /* 'Blocked' flag not needed as failed devices
8451                                  * will be recorded if array switched to read/write.
8452                                  * Leaving it set will prevent the device
8453                                  * from being removed.
8454                                  */
8455                                 rdev_for_each(rdev, mddev)
8456                                         clear_bit(Blocked, &rdev->flags);
8457                         /* On a read-only array we can:
8458                          * - remove failed devices
8459                          * - add already-in_sync devices if the array itself
8460                          *   is in-sync.
8461                          * As we only add devices that are already in-sync,
8462                          * we can activate the spares immediately.
8463                          */
8464                         remove_and_add_spares(mddev, NULL);
8465                         /* There is no thread, but we need to call
8466                          * ->spare_active and clear saved_raid_disk
8467                          */
8468                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8469                         md_reap_sync_thread(mddev);
8470                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8471                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8472                         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
8473                         goto unlock;
8474                 }
8475
8476                 if (mddev_is_clustered(mddev)) {
8477                         struct md_rdev *rdev, *tmp;
8478                         /* kick the device if another node issued a
8479                          * remove disk.
8480                          */
8481                         rdev_for_each_safe(rdev, tmp, mddev) {
8482                                 if (test_and_clear_bit(ClusterRemove, &rdev->flags) &&
8483                                                 rdev->raid_disk < 0)
8484                                         md_kick_rdev_from_array(rdev);
8485                         }
8486
8487                         if (test_and_clear_bit(MD_RELOAD_SB, &mddev->flags))
8488                                 md_reload_sb(mddev, mddev->good_device_nr);
8489                 }
8490
8491                 if (!mddev->external) {
8492                         int did_change = 0;
8493                         spin_lock(&mddev->lock);
8494                         if (mddev->safemode &&
8495                             !atomic_read(&mddev->writes_pending) &&
8496                             !mddev->in_sync &&
8497                             mddev->recovery_cp == MaxSector) {
8498                                 mddev->in_sync = 1;
8499                                 did_change = 1;
8500                                 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
8501                         }
8502                         if (mddev->safemode == 1)
8503                                 mddev->safemode = 0;
8504                         spin_unlock(&mddev->lock);
8505                         if (did_change)
8506                                 sysfs_notify_dirent_safe(mddev->sysfs_state);
8507                 }
8508
8509                 if (mddev->flags & MD_UPDATE_SB_FLAGS)
8510                         md_update_sb(mddev, 0);
8511
8512                 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
8513                     !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
8514                         /* resync/recovery still happening */
8515                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8516                         goto unlock;
8517                 }
8518                 if (mddev->sync_thread) {
8519                         md_reap_sync_thread(mddev);
8520                         goto unlock;
8521                 }
8522                 /* Set RUNNING before clearing NEEDED to avoid
8523                  * any transients in the value of "sync_action".
8524                  */
8525                 mddev->curr_resync_completed = 0;
8526                 spin_lock(&mddev->lock);
8527                 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
8528                 spin_unlock(&mddev->lock);
8529                 /* Clear some bits that don't mean anything, but
8530                  * might be left set
8531                  */
8532                 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
8533                 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
8534
8535                 if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
8536                     test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
8537                         goto not_running;
8538                 /* no recovery is running.
8539                  * remove any failed drives, then
8540                  * add spares if possible.
8541                  * Spares are also removed and re-added, to allow
8542                  * the personality to fail the re-add.
8543                  */
8544
8545                 if (mddev->reshape_position != MaxSector) {
8546                         if (mddev->pers->check_reshape == NULL ||
8547                             mddev->pers->check_reshape(mddev) != 0)
8548                                 /* Cannot proceed */
8549                                 goto not_running;
8550                         set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
8551                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8552                 } else if ((spares = remove_and_add_spares(mddev, NULL))) {
8553                         clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8554                         clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
8555                         clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
8556                         set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8557                 } else if (mddev->recovery_cp < MaxSector) {
8558                         set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8559                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8560                 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
8561                         /* nothing to be done ... */
8562                         goto not_running;
8563
8564                 if (mddev->pers->sync_request) {
8565                         if (spares) {
8566                                 /* We are adding a device or devices to an array
8567                                  * which has the bitmap stored on all devices.
8568                                  * So make sure all bitmap pages get written
8569                                  */
8570                                 bitmap_write_all(mddev->bitmap);
8571                         }
8572                         INIT_WORK(&mddev->del_work, md_start_sync);
8573                         queue_work(md_misc_wq, &mddev->del_work);
8574                         goto unlock;
8575                 }
8576         not_running:
8577                 if (!mddev->sync_thread) {
8578                         clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
8579                         wake_up(&resync_wait);
8580                         if (test_and_clear_bit(MD_RECOVERY_RECOVER,
8581                                                &mddev->recovery))
8582                                 if (mddev->sysfs_action)
8583                                         sysfs_notify_dirent_safe(mddev->sysfs_action);
8584                 }
8585         unlock:
8586                 wake_up(&mddev->sb_wait);
8587                 mddev_unlock(mddev);
8588         }
8589 }
8590 EXPORT_SYMBOL(md_check_recovery);
8591
8592 void md_reap_sync_thread(struct mddev *mddev)
8593 {
8594         struct md_rdev *rdev;
8595
8596         /* resync has finished, collect result */
8597         md_unregister_thread(&mddev->sync_thread);
8598         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
8599             !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
8600             mddev->degraded != mddev->raid_disks) {
8601                 /* success...*/
8602                 /* activate any spares */
8603                 if (mddev->pers->spare_active(mddev)) {
8604                         sysfs_notify(&mddev->kobj, NULL,
8605                                      "degraded");
8606                         set_bit(MD_CHANGE_DEVS, &mddev->flags);
8607                 }
8608         }
8609         if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8610             mddev->pers->finish_reshape)
8611                 mddev->pers->finish_reshape(mddev);
8612
8613         /* If array is no-longer degraded, then any saved_raid_disk
8614          * information must be scrapped.
8615          */
8616         if (!mddev->degraded)
8617                 rdev_for_each(rdev, mddev)
8618                         rdev->saved_raid_disk = -1;
8619
8620         md_update_sb(mddev, 1);
8621         /* MD_CHANGE_PENDING should be cleared by md_update_sb, so we can
8622          * call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by
8623          * clustered raid */
8624         if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags))
8625                 md_cluster_ops->resync_finish(mddev);
8626         clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
8627         clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
8628         clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8629         clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
8630         clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
8631         clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
8632         wake_up(&resync_wait);
8633         /* flag recovery needed just to double check */
8634         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8635         sysfs_notify_dirent_safe(mddev->sysfs_action);
8636         md_new_event(mddev);
8637         if (mddev->event_work.func)
8638                 queue_work(md_misc_wq, &mddev->event_work);
8639 }
8640 EXPORT_SYMBOL(md_reap_sync_thread);
8641
8642 void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
8643 {
8644         sysfs_notify_dirent_safe(rdev->sysfs_state);
8645         wait_event_timeout(rdev->blocked_wait,
8646                            !test_bit(Blocked, &rdev->flags) &&
8647                            !test_bit(BlockedBadBlocks, &rdev->flags),
8648                            msecs_to_jiffies(5000));
8649         rdev_dec_pending(rdev, mddev);
8650 }
8651 EXPORT_SYMBOL(md_wait_for_blocked_rdev);
8652
8653 void md_finish_reshape(struct mddev *mddev)
8654 {
8655         /* called be personality module when reshape completes. */
8656         struct md_rdev *rdev;
8657
8658         rdev_for_each(rdev, mddev) {
8659                 if (rdev->data_offset > rdev->new_data_offset)
8660                         rdev->sectors += rdev->data_offset - rdev->new_data_offset;
8661                 else
8662                         rdev->sectors -= rdev->new_data_offset - rdev->data_offset;
8663                 rdev->data_offset = rdev->new_data_offset;
8664         }
8665 }
8666 EXPORT_SYMBOL(md_finish_reshape);
8667
8668 /* Bad block management */
8669
8670 /* Returns 1 on success, 0 on failure */
8671 int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
8672                        int is_new)
8673 {
8674         struct mddev *mddev = rdev->mddev;
8675         int rv;
8676         if (is_new)
8677                 s += rdev->new_data_offset;
8678         else
8679                 s += rdev->data_offset;
8680         rv = badblocks_set(&rdev->badblocks, s, sectors, 0);
8681         if (rv == 0) {
8682                 /* Make sure they get written out promptly */
8683                 sysfs_notify_dirent_safe(rdev->sysfs_state);
8684                 set_mask_bits(&mddev->flags, 0,
8685                               BIT(MD_CHANGE_CLEAN) | BIT(MD_CHANGE_PENDING));
8686                 md_wakeup_thread(rdev->mddev->thread);
8687                 return 1;
8688         } else
8689                 return 0;
8690 }
8691 EXPORT_SYMBOL_GPL(rdev_set_badblocks);
8692
8693 int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
8694                          int is_new)
8695 {
8696         if (is_new)
8697                 s += rdev->new_data_offset;
8698         else
8699                 s += rdev->data_offset;
8700         return badblocks_clear(&rdev->badblocks,
8701                                   s, sectors);
8702 }
8703 EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
8704
8705 static int md_notify_reboot(struct notifier_block *this,
8706                             unsigned long code, void *x)
8707 {
8708         struct list_head *tmp;
8709         struct mddev *mddev;
8710         int need_delay = 0;
8711
8712         for_each_mddev(mddev, tmp) {
8713                 if (mddev_trylock(mddev)) {
8714                         if (mddev->pers)
8715                                 __md_stop_writes(mddev);
8716                         if (mddev->persistent)
8717                                 mddev->safemode = 2;
8718                         mddev_unlock(mddev);
8719                 }
8720                 need_delay = 1;
8721         }
8722         /*
8723          * certain more exotic SCSI devices are known to be
8724          * volatile wrt too early system reboots. While the
8725          * right place to handle this issue is the given
8726          * driver, we do want to have a safe RAID driver ...
8727          */
8728         if (need_delay)
8729                 mdelay(1000*1);
8730
8731         return NOTIFY_DONE;
8732 }
8733
8734 static struct notifier_block md_notifier = {
8735         .notifier_call  = md_notify_reboot,
8736         .next           = NULL,
8737         .priority       = INT_MAX, /* before any real devices */
8738 };
8739
8740 static void md_geninit(void)
8741 {
8742         pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
8743
8744         proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
8745 }
8746
8747 static int __init md_init(void)
8748 {
8749         int ret = -ENOMEM;
8750
8751         md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
8752         if (!md_wq)
8753                 goto err_wq;
8754
8755         md_misc_wq = alloc_workqueue("md_misc", 0, 0);
8756         if (!md_misc_wq)
8757                 goto err_misc_wq;
8758
8759         if ((ret = register_blkdev(MD_MAJOR, "md")) < 0)
8760                 goto err_md;
8761
8762         if ((ret = register_blkdev(0, "mdp")) < 0)
8763                 goto err_mdp;
8764         mdp_major = ret;
8765
8766         blk_register_region(MKDEV(MD_MAJOR, 0), 512, THIS_MODULE,
8767                             md_probe, NULL, NULL);
8768         blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
8769                             md_probe, NULL, NULL);
8770
8771         register_reboot_notifier(&md_notifier);
8772         raid_table_header = register_sysctl_table(raid_root_table);
8773
8774         md_geninit();
8775         return 0;
8776
8777 err_mdp:
8778         unregister_blkdev(MD_MAJOR, "md");
8779 err_md:
8780         destroy_workqueue(md_misc_wq);
8781 err_misc_wq:
8782         destroy_workqueue(md_wq);
8783 err_wq:
8784         return ret;
8785 }
8786
8787 static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
8788 {
8789         struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
8790         struct md_rdev *rdev2, *tmp;
8791         int role, ret;
8792         char b[BDEVNAME_SIZE];
8793
8794         /* Check for change of roles in the active devices */
8795         rdev_for_each_safe(rdev2, tmp, mddev) {
8796                 if (test_bit(Faulty, &rdev2->flags))
8797                         continue;
8798
8799                 /* Check if the roles changed */
8800                 role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]);
8801
8802                 if (test_bit(Candidate, &rdev2->flags)) {
8803                         if (role == 0xfffe) {
8804                                 pr_info("md: Removing Candidate device %s because add failed\n", bdevname(rdev2->bdev,b));
8805                                 md_kick_rdev_from_array(rdev2);
8806                                 continue;
8807                         }
8808                         else
8809                                 clear_bit(Candidate, &rdev2->flags);
8810                 }
8811
8812                 if (role != rdev2->raid_disk) {
8813                         /* got activated */
8814                         if (rdev2->raid_disk == -1 && role != 0xffff) {
8815                                 rdev2->saved_raid_disk = role;
8816                                 ret = remove_and_add_spares(mddev, rdev2);
8817                                 pr_info("Activated spare: %s\n",
8818                                                 bdevname(rdev2->bdev,b));
8819                                 /* wakeup mddev->thread here, so array could
8820                                  * perform resync with the new activated disk */
8821                                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8822                                 md_wakeup_thread(mddev->thread);
8823
8824                         }
8825                         /* device faulty
8826                          * We just want to do the minimum to mark the disk
8827                          * as faulty. The recovery is performed by the
8828                          * one who initiated the error.
8829                          */
8830                         if ((role == 0xfffe) || (role == 0xfffd)) {
8831                                 md_error(mddev, rdev2);
8832                                 clear_bit(Blocked, &rdev2->flags);
8833                         }
8834                 }
8835         }
8836
8837         if (mddev->raid_disks != le32_to_cpu(sb->raid_disks))
8838                 update_raid_disks(mddev, le32_to_cpu(sb->raid_disks));
8839
8840         /* Finally set the event to be up to date */
8841         mddev->events = le64_to_cpu(sb->events);
8842 }
8843
8844 static int read_rdev(struct mddev *mddev, struct md_rdev *rdev)
8845 {
8846         int err;
8847         struct page *swapout = rdev->sb_page;
8848         struct mdp_superblock_1 *sb;
8849
8850         /* Store the sb page of the rdev in the swapout temporary
8851          * variable in case we err in the future
8852          */
8853         rdev->sb_page = NULL;
8854         alloc_disk_sb(rdev);
8855         ClearPageUptodate(rdev->sb_page);
8856         rdev->sb_loaded = 0;
8857         err = super_types[mddev->major_version].load_super(rdev, NULL, mddev->minor_version);
8858
8859         if (err < 0) {
8860                 pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n",
8861                                 __func__, __LINE__, rdev->desc_nr, err);
8862                 put_page(rdev->sb_page);
8863                 rdev->sb_page = swapout;
8864                 rdev->sb_loaded = 1;
8865                 return err;
8866         }
8867
8868         sb = page_address(rdev->sb_page);
8869         /* Read the offset unconditionally, even if MD_FEATURE_RECOVERY_OFFSET
8870          * is not set
8871          */
8872
8873         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_OFFSET))
8874                 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
8875
8876         /* The other node finished recovery, call spare_active to set
8877          * device In_sync and mddev->degraded
8878          */
8879         if (rdev->recovery_offset == MaxSector &&
8880             !test_bit(In_sync, &rdev->flags) &&
8881             mddev->pers->spare_active(mddev))
8882                 sysfs_notify(&mddev->kobj, NULL, "degraded");
8883
8884         put_page(swapout);
8885         return 0;
8886 }
8887
8888 void md_reload_sb(struct mddev *mddev, int nr)
8889 {
8890         struct md_rdev *rdev = NULL, *iter;
8891         int err;
8892
8893         /* Find the rdev */
8894         rdev_for_each_rcu(iter, mddev) {
8895                 if (iter->desc_nr == nr) {
8896                         rdev = iter;
8897                         break;
8898                 }
8899         }
8900
8901         if (!rdev) {
8902                 pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr);
8903                 return;
8904         }
8905
8906         err = read_rdev(mddev, rdev);
8907         if (err < 0)
8908                 return;
8909
8910         check_sb_changes(mddev, rdev);
8911
8912         /* Read all rdev's to update recovery_offset */
8913         rdev_for_each_rcu(rdev, mddev)
8914                 read_rdev(mddev, rdev);
8915 }
8916 EXPORT_SYMBOL(md_reload_sb);
8917
8918 #ifndef MODULE
8919
8920 /*
8921  * Searches all registered partitions for autorun RAID arrays
8922  * at boot time.
8923  */
8924
8925 static DEFINE_MUTEX(detected_devices_mutex);
8926 static LIST_HEAD(all_detected_devices);
8927 struct detected_devices_node {
8928         struct list_head list;
8929         dev_t dev;
8930 };
8931
8932 void md_autodetect_dev(dev_t dev)
8933 {
8934         struct detected_devices_node *node_detected_dev;
8935
8936         node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
8937         if (node_detected_dev) {
8938                 node_detected_dev->dev = dev;
8939                 mutex_lock(&detected_devices_mutex);
8940                 list_add_tail(&node_detected_dev->list, &all_detected_devices);
8941                 mutex_unlock(&detected_devices_mutex);
8942         } else {
8943                 printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed"
8944                         ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev));
8945         }
8946 }
8947
8948 static void autostart_arrays(int part)
8949 {
8950         struct md_rdev *rdev;
8951         struct detected_devices_node *node_detected_dev;
8952         dev_t dev;
8953         int i_scanned, i_passed;
8954
8955         i_scanned = 0;
8956         i_passed = 0;
8957
8958         printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
8959
8960         mutex_lock(&detected_devices_mutex);
8961         while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
8962                 i_scanned++;
8963                 node_detected_dev = list_entry(all_detected_devices.next,
8964                                         struct detected_devices_node, list);
8965                 list_del(&node_detected_dev->list);
8966                 dev = node_detected_dev->dev;
8967                 kfree(node_detected_dev);
8968                 mutex_unlock(&detected_devices_mutex);
8969                 rdev = md_import_device(dev,0, 90);
8970                 mutex_lock(&detected_devices_mutex);
8971                 if (IS_ERR(rdev))
8972                         continue;
8973
8974                 if (test_bit(Faulty, &rdev->flags))
8975                         continue;
8976
8977                 set_bit(AutoDetected, &rdev->flags);
8978                 list_add(&rdev->same_set, &pending_raid_disks);
8979                 i_passed++;
8980         }
8981         mutex_unlock(&detected_devices_mutex);
8982
8983         printk(KERN_INFO "md: Scanned %d and added %d devices.\n",
8984                                                 i_scanned, i_passed);
8985
8986         autorun_devices(part);
8987 }
8988
8989 #endif /* !MODULE */
8990
8991 static __exit void md_exit(void)
8992 {
8993         struct mddev *mddev;
8994         struct list_head *tmp;
8995         int delay = 1;
8996
8997         blk_unregister_region(MKDEV(MD_MAJOR,0), 512);
8998         blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
8999
9000         unregister_blkdev(MD_MAJOR,"md");
9001         unregister_blkdev(mdp_major, "mdp");
9002         unregister_reboot_notifier(&md_notifier);
9003         unregister_sysctl_table(raid_table_header);
9004
9005         /* We cannot unload the modules while some process is
9006          * waiting for us in select() or poll() - wake them up
9007          */
9008         md_unloading = 1;
9009         while (waitqueue_active(&md_event_waiters)) {
9010                 /* not safe to leave yet */
9011                 wake_up(&md_event_waiters);
9012                 msleep(delay);
9013                 delay += delay;
9014         }
9015         remove_proc_entry("mdstat", NULL);
9016
9017         for_each_mddev(mddev, tmp) {
9018                 export_array(mddev);
9019                 mddev->hold_active = 0;
9020         }
9021         destroy_workqueue(md_misc_wq);
9022         destroy_workqueue(md_wq);
9023 }
9024
9025 subsys_initcall(md_init);
9026 module_exit(md_exit)
9027
9028 static int get_ro(char *buffer, struct kernel_param *kp)
9029 {
9030         return sprintf(buffer, "%d", start_readonly);
9031 }
9032 static int set_ro(const char *val, struct kernel_param *kp)
9033 {
9034         return kstrtouint(val, 10, (unsigned int *)&start_readonly);
9035 }
9036
9037 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
9038 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
9039 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
9040
9041 MODULE_LICENSE("GPL");
9042 MODULE_DESCRIPTION("MD RAID framework");
9043 MODULE_ALIAS("md");
9044 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);