GNU Linux-libre 4.14.313-gnu1
[releases.git] / drivers / lightnvm / pblk-recovery.c
1 /*
2  * Copyright (C) 2016 CNEX Labs
3  * Initial: Javier Gonzalez <javier@cnexlabs.com>
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version
7  * 2 as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * General Public License for more details.
13  *
14  * pblk-recovery.c - pblk's recovery path
15  */
16
17 #include "pblk.h"
18
19 void pblk_submit_rec(struct work_struct *work)
20 {
21         struct pblk_rec_ctx *recovery =
22                         container_of(work, struct pblk_rec_ctx, ws_rec);
23         struct pblk *pblk = recovery->pblk;
24         struct nvm_tgt_dev *dev = pblk->dev;
25         struct nvm_rq *rqd = recovery->rqd;
26         struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
27         int max_secs = nvm_max_phys_sects(dev);
28         struct bio *bio;
29         unsigned int nr_rec_secs;
30         unsigned int pgs_read;
31         int ret;
32
33         nr_rec_secs = bitmap_weight((unsigned long int *)&rqd->ppa_status,
34                                                                 max_secs);
35
36         bio = bio_alloc(GFP_KERNEL, nr_rec_secs);
37         if (!bio) {
38                 pr_err("pblk: not able to create recovery bio\n");
39                 return;
40         }
41
42         bio->bi_iter.bi_sector = 0;
43         bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
44         rqd->bio = bio;
45         rqd->nr_ppas = nr_rec_secs;
46
47         pgs_read = pblk_rb_read_to_bio_list(&pblk->rwb, bio, &recovery->failed,
48                                                                 nr_rec_secs);
49         if (pgs_read != nr_rec_secs) {
50                 pr_err("pblk: could not read recovery entries\n");
51                 goto err;
52         }
53
54         if (pblk_setup_w_rec_rq(pblk, rqd, c_ctx)) {
55                 pr_err("pblk: could not setup recovery request\n");
56                 goto err;
57         }
58
59 #ifdef CONFIG_NVM_DEBUG
60         atomic_long_add(nr_rec_secs, &pblk->recov_writes);
61 #endif
62
63         ret = pblk_submit_io(pblk, rqd);
64         if (ret) {
65                 pr_err("pblk: I/O submission failed: %d\n", ret);
66                 goto err;
67         }
68
69         mempool_free(recovery, pblk->rec_pool);
70         return;
71
72 err:
73         bio_put(bio);
74         pblk_free_rqd(pblk, rqd, WRITE);
75 }
76
77 int pblk_recov_setup_rq(struct pblk *pblk, struct pblk_c_ctx *c_ctx,
78                         struct pblk_rec_ctx *recovery, u64 *comp_bits,
79                         unsigned int comp)
80 {
81         struct nvm_tgt_dev *dev = pblk->dev;
82         int max_secs = nvm_max_phys_sects(dev);
83         struct nvm_rq *rec_rqd;
84         struct pblk_c_ctx *rec_ctx;
85         int nr_entries = c_ctx->nr_valid + c_ctx->nr_padded;
86
87         rec_rqd = pblk_alloc_rqd(pblk, WRITE);
88         if (IS_ERR(rec_rqd)) {
89                 pr_err("pblk: could not create recovery req.\n");
90                 return -ENOMEM;
91         }
92
93         rec_ctx = nvm_rq_to_pdu(rec_rqd);
94
95         /* Copy completion bitmap, but exclude the first X completed entries */
96         bitmap_shift_right((unsigned long int *)&rec_rqd->ppa_status,
97                                 (unsigned long int *)comp_bits,
98                                 comp, max_secs);
99
100         /* Save the context for the entries that need to be re-written and
101          * update current context with the completed entries.
102          */
103         rec_ctx->sentry = pblk_rb_wrap_pos(&pblk->rwb, c_ctx->sentry + comp);
104         if (comp >= c_ctx->nr_valid) {
105                 rec_ctx->nr_valid = 0;
106                 rec_ctx->nr_padded = nr_entries - comp;
107
108                 c_ctx->nr_padded = comp - c_ctx->nr_valid;
109         } else {
110                 rec_ctx->nr_valid = c_ctx->nr_valid - comp;
111                 rec_ctx->nr_padded = c_ctx->nr_padded;
112
113                 c_ctx->nr_valid = comp;
114                 c_ctx->nr_padded = 0;
115         }
116
117         recovery->rqd = rec_rqd;
118         recovery->pblk = pblk;
119
120         return 0;
121 }
122
123 __le64 *pblk_recov_get_lba_list(struct pblk *pblk, struct line_emeta *emeta_buf)
124 {
125         u32 crc;
126
127         crc = pblk_calc_emeta_crc(pblk, emeta_buf);
128         if (le32_to_cpu(emeta_buf->crc) != crc)
129                 return NULL;
130
131         if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC)
132                 return NULL;
133
134         return emeta_to_lbas(pblk, emeta_buf);
135 }
136
137 static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
138 {
139         struct nvm_tgt_dev *dev = pblk->dev;
140         struct nvm_geo *geo = &dev->geo;
141         struct pblk_line_meta *lm = &pblk->lm;
142         struct pblk_emeta *emeta = line->emeta;
143         struct line_emeta *emeta_buf = emeta->buf;
144         __le64 *lba_list;
145         int data_start;
146         int nr_data_lbas, nr_valid_lbas, nr_lbas = 0;
147         int i;
148
149         lba_list = pblk_recov_get_lba_list(pblk, emeta_buf);
150         if (!lba_list)
151                 return 1;
152
153         data_start = pblk_line_smeta_start(pblk, line) + lm->smeta_sec;
154         nr_data_lbas = lm->sec_per_line - lm->emeta_sec[0];
155         nr_valid_lbas = le64_to_cpu(emeta_buf->nr_valid_lbas);
156
157         for (i = data_start; i < nr_data_lbas && nr_lbas < nr_valid_lbas; i++) {
158                 struct ppa_addr ppa;
159                 int pos;
160
161                 ppa = addr_to_pblk_ppa(pblk, i, line->id);
162                 pos = pblk_ppa_to_pos(geo, ppa);
163
164                 /* Do not update bad blocks */
165                 if (test_bit(pos, line->blk_bitmap))
166                         continue;
167
168                 if (le64_to_cpu(lba_list[i]) == ADDR_EMPTY) {
169                         spin_lock(&line->lock);
170                         if (test_and_set_bit(i, line->invalid_bitmap))
171                                 WARN_ONCE(1, "pblk: rec. double invalidate:\n");
172                         else
173                                 le32_add_cpu(line->vsc, -1);
174                         spin_unlock(&line->lock);
175
176                         continue;
177                 }
178
179                 pblk_update_map(pblk, le64_to_cpu(lba_list[i]), ppa);
180                 nr_lbas++;
181         }
182
183         if (nr_valid_lbas != nr_lbas)
184                 pr_err("pblk: line %d - inconsistent lba list(%llu/%d)\n",
185                                 line->id, emeta_buf->nr_valid_lbas, nr_lbas);
186
187         line->left_msecs = 0;
188
189         return 0;
190 }
191
192 static int pblk_calc_sec_in_line(struct pblk *pblk, struct pblk_line *line)
193 {
194         struct nvm_tgt_dev *dev = pblk->dev;
195         struct nvm_geo *geo = &dev->geo;
196         struct pblk_line_meta *lm = &pblk->lm;
197         int nr_bb = bitmap_weight(line->blk_bitmap, lm->blk_per_line);
198
199         return lm->sec_per_line - lm->smeta_sec - lm->emeta_sec[0] -
200                                 nr_bb * geo->sec_per_blk;
201 }
202
203 struct pblk_recov_alloc {
204         struct ppa_addr *ppa_list;
205         struct pblk_sec_meta *meta_list;
206         struct nvm_rq *rqd;
207         void *data;
208         dma_addr_t dma_ppa_list;
209         dma_addr_t dma_meta_list;
210 };
211
212 static int pblk_recov_read_oob(struct pblk *pblk, struct pblk_line *line,
213                                struct pblk_recov_alloc p, u64 r_ptr)
214 {
215         struct nvm_tgt_dev *dev = pblk->dev;
216         struct nvm_geo *geo = &dev->geo;
217         struct ppa_addr *ppa_list;
218         struct pblk_sec_meta *meta_list;
219         struct nvm_rq *rqd;
220         struct bio *bio;
221         void *data;
222         dma_addr_t dma_ppa_list, dma_meta_list;
223         u64 r_ptr_int;
224         int left_ppas;
225         int rq_ppas, rq_len;
226         int i, j;
227         int ret = 0;
228         DECLARE_COMPLETION_ONSTACK(wait);
229
230         ppa_list = p.ppa_list;
231         meta_list = p.meta_list;
232         rqd = p.rqd;
233         data = p.data;
234         dma_ppa_list = p.dma_ppa_list;
235         dma_meta_list = p.dma_meta_list;
236
237         left_ppas = line->cur_sec - r_ptr;
238         if (!left_ppas)
239                 return 0;
240
241         r_ptr_int = r_ptr;
242
243 next_read_rq:
244         memset(rqd, 0, pblk_g_rq_size);
245
246         rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
247         if (!rq_ppas)
248                 rq_ppas = pblk->min_write_pgs;
249         rq_len = rq_ppas * geo->sec_size;
250
251         bio = bio_map_kern(dev->q, data, rq_len, GFP_KERNEL);
252         if (IS_ERR(bio))
253                 return PTR_ERR(bio);
254
255         bio->bi_iter.bi_sector = 0; /* internal bio */
256         bio_set_op_attrs(bio, REQ_OP_READ, 0);
257
258         rqd->bio = bio;
259         rqd->opcode = NVM_OP_PREAD;
260         rqd->meta_list = meta_list;
261         rqd->nr_ppas = rq_ppas;
262         rqd->ppa_list = ppa_list;
263         rqd->dma_ppa_list = dma_ppa_list;
264         rqd->dma_meta_list = dma_meta_list;
265         rqd->end_io = pblk_end_io_sync;
266         rqd->private = &wait;
267
268         if (pblk_io_aligned(pblk, rq_ppas))
269                 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
270         else
271                 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
272
273         for (i = 0; i < rqd->nr_ppas; ) {
274                 struct ppa_addr ppa;
275                 int pos;
276
277                 ppa = addr_to_gen_ppa(pblk, r_ptr_int, line->id);
278                 pos = pblk_dev_ppa_to_pos(geo, ppa);
279
280                 while (test_bit(pos, line->blk_bitmap)) {
281                         r_ptr_int += pblk->min_write_pgs;
282                         ppa = addr_to_gen_ppa(pblk, r_ptr_int, line->id);
283                         pos = pblk_dev_ppa_to_pos(geo, ppa);
284                 }
285
286                 for (j = 0; j < pblk->min_write_pgs; j++, i++, r_ptr_int++)
287                         rqd->ppa_list[i] =
288                                 addr_to_gen_ppa(pblk, r_ptr_int, line->id);
289         }
290
291         /* If read fails, more padding is needed */
292         ret = pblk_submit_io(pblk, rqd);
293         if (ret) {
294                 pr_err("pblk: I/O submission failed: %d\n", ret);
295                 return ret;
296         }
297
298         if (!wait_for_completion_io_timeout(&wait,
299                                 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
300                 pr_err("pblk: L2P recovery read timed out\n");
301                 return -EINTR;
302         }
303         atomic_dec(&pblk->inflight_io);
304         reinit_completion(&wait);
305
306         /* At this point, the read should not fail. If it does, it is a problem
307          * we cannot recover from here. Need FTL log.
308          */
309         if (rqd->error) {
310                 pr_err("pblk: L2P recovery failed (%d)\n", rqd->error);
311                 return -EINTR;
312         }
313
314         for (i = 0; i < rqd->nr_ppas; i++) {
315                 u64 lba = le64_to_cpu(meta_list[i].lba);
316
317                 if (lba == ADDR_EMPTY || lba > pblk->rl.nr_secs)
318                         continue;
319
320                 pblk_update_map(pblk, lba, rqd->ppa_list[i]);
321         }
322
323         left_ppas -= rq_ppas;
324         if (left_ppas > 0)
325                 goto next_read_rq;
326
327         return 0;
328 }
329
330 static void pblk_recov_complete(struct kref *ref)
331 {
332         struct pblk_pad_rq *pad_rq = container_of(ref, struct pblk_pad_rq, ref);
333
334         complete(&pad_rq->wait);
335 }
336
337 static void pblk_end_io_recov(struct nvm_rq *rqd)
338 {
339         struct pblk_pad_rq *pad_rq = rqd->private;
340         struct pblk *pblk = pad_rq->pblk;
341         struct nvm_tgt_dev *dev = pblk->dev;
342
343         pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
344
345         bio_put(rqd->bio);
346         nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
347         pblk_free_rqd(pblk, rqd, WRITE);
348
349         atomic_dec(&pblk->inflight_io);
350         kref_put(&pad_rq->ref, pblk_recov_complete);
351 }
352
353 static int pblk_recov_pad_oob(struct pblk *pblk, struct pblk_line *line,
354                               int left_ppas)
355 {
356         struct nvm_tgt_dev *dev = pblk->dev;
357         struct nvm_geo *geo = &dev->geo;
358         struct ppa_addr *ppa_list;
359         struct pblk_sec_meta *meta_list;
360         struct pblk_pad_rq *pad_rq;
361         struct nvm_rq *rqd;
362         struct bio *bio;
363         void *data;
364         dma_addr_t dma_ppa_list, dma_meta_list;
365         __le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
366         u64 w_ptr = line->cur_sec;
367         int left_line_ppas, rq_ppas, rq_len;
368         int i, j;
369         int ret = 0;
370
371         spin_lock(&line->lock);
372         left_line_ppas = line->left_msecs;
373         spin_unlock(&line->lock);
374
375         pad_rq = kmalloc(sizeof(struct pblk_pad_rq), GFP_KERNEL);
376         if (!pad_rq)
377                 return -ENOMEM;
378
379         data = vzalloc(pblk->max_write_pgs * geo->sec_size);
380         if (!data) {
381                 ret = -ENOMEM;
382                 goto free_rq;
383         }
384
385         pad_rq->pblk = pblk;
386         init_completion(&pad_rq->wait);
387         kref_init(&pad_rq->ref);
388
389 next_pad_rq:
390         rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
391         if (rq_ppas < pblk->min_write_pgs) {
392                 pr_err("pblk: corrupted pad line %d\n", line->id);
393                 goto fail_free_pad;
394         }
395
396         rq_len = rq_ppas * geo->sec_size;
397
398         meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_meta_list);
399         if (!meta_list) {
400                 ret = -ENOMEM;
401                 goto fail_free_pad;
402         }
403
404         ppa_list = (void *)(meta_list) + pblk_dma_meta_size;
405         dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
406
407         rqd = pblk_alloc_rqd(pblk, WRITE);
408         if (IS_ERR(rqd)) {
409                 ret = PTR_ERR(rqd);
410                 goto fail_free_meta;
411         }
412
413         bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
414                                                 PBLK_VMALLOC_META, GFP_KERNEL);
415         if (IS_ERR(bio)) {
416                 ret = PTR_ERR(bio);
417                 goto fail_free_rqd;
418         }
419
420         bio->bi_iter.bi_sector = 0; /* internal bio */
421         bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
422
423         rqd->bio = bio;
424         rqd->opcode = NVM_OP_PWRITE;
425         rqd->flags = pblk_set_progr_mode(pblk, WRITE);
426         rqd->meta_list = meta_list;
427         rqd->nr_ppas = rq_ppas;
428         rqd->ppa_list = ppa_list;
429         rqd->dma_ppa_list = dma_ppa_list;
430         rqd->dma_meta_list = dma_meta_list;
431         rqd->end_io = pblk_end_io_recov;
432         rqd->private = pad_rq;
433
434         for (i = 0; i < rqd->nr_ppas; ) {
435                 struct ppa_addr ppa;
436                 int pos;
437
438                 w_ptr = pblk_alloc_page(pblk, line, pblk->min_write_pgs);
439                 ppa = addr_to_pblk_ppa(pblk, w_ptr, line->id);
440                 pos = pblk_ppa_to_pos(geo, ppa);
441
442                 while (test_bit(pos, line->blk_bitmap)) {
443                         w_ptr += pblk->min_write_pgs;
444                         ppa = addr_to_pblk_ppa(pblk, w_ptr, line->id);
445                         pos = pblk_ppa_to_pos(geo, ppa);
446                 }
447
448                 for (j = 0; j < pblk->min_write_pgs; j++, i++, w_ptr++) {
449                         struct ppa_addr dev_ppa;
450                         __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
451
452                         dev_ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
453
454                         pblk_map_invalidate(pblk, dev_ppa);
455                         lba_list[w_ptr] = meta_list[i].lba = addr_empty;
456                         rqd->ppa_list[i] = dev_ppa;
457                 }
458         }
459
460         kref_get(&pad_rq->ref);
461         pblk_down_page(pblk, rqd->ppa_list, rqd->nr_ppas);
462
463         ret = pblk_submit_io(pblk, rqd);
464         if (ret) {
465                 pr_err("pblk: I/O submission failed: %d\n", ret);
466                 pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
467                 goto fail_free_bio;
468         }
469
470         left_line_ppas -= rq_ppas;
471         left_ppas -= rq_ppas;
472         if (left_ppas && left_line_ppas)
473                 goto next_pad_rq;
474
475         kref_put(&pad_rq->ref, pblk_recov_complete);
476
477         if (!wait_for_completion_io_timeout(&pad_rq->wait,
478                                 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
479                 pr_err("pblk: pad write timed out\n");
480                 ret = -ETIME;
481         }
482
483         if (!pblk_line_is_full(line))
484                 pr_err("pblk: corrupted padded line: %d\n", line->id);
485
486         vfree(data);
487 free_rq:
488         kfree(pad_rq);
489         return ret;
490
491 fail_free_bio:
492         bio_put(bio);
493 fail_free_rqd:
494         pblk_free_rqd(pblk, rqd, WRITE);
495 fail_free_meta:
496         nvm_dev_dma_free(dev->parent, meta_list, dma_meta_list);
497 fail_free_pad:
498         kfree(pad_rq);
499         vfree(data);
500         return ret;
501 }
502
503 /* When this function is called, it means that not all upper pages have been
504  * written in a page that contains valid data. In order to recover this data, we
505  * first find the write pointer on the device, then we pad all necessary
506  * sectors, and finally attempt to read the valid data
507  */
508 static int pblk_recov_scan_all_oob(struct pblk *pblk, struct pblk_line *line,
509                                    struct pblk_recov_alloc p)
510 {
511         struct nvm_tgt_dev *dev = pblk->dev;
512         struct nvm_geo *geo = &dev->geo;
513         struct ppa_addr *ppa_list;
514         struct pblk_sec_meta *meta_list;
515         struct nvm_rq *rqd;
516         struct bio *bio;
517         void *data;
518         dma_addr_t dma_ppa_list, dma_meta_list;
519         u64 w_ptr = 0, r_ptr;
520         int rq_ppas, rq_len;
521         int i, j;
522         int ret = 0;
523         int rec_round;
524         int left_ppas = pblk_calc_sec_in_line(pblk, line) - line->cur_sec;
525         DECLARE_COMPLETION_ONSTACK(wait);
526
527         ppa_list = p.ppa_list;
528         meta_list = p.meta_list;
529         rqd = p.rqd;
530         data = p.data;
531         dma_ppa_list = p.dma_ppa_list;
532         dma_meta_list = p.dma_meta_list;
533
534         /* we could recover up until the line write pointer */
535         r_ptr = line->cur_sec;
536         rec_round = 0;
537
538 next_rq:
539         memset(rqd, 0, pblk_g_rq_size);
540
541         rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
542         if (!rq_ppas)
543                 rq_ppas = pblk->min_write_pgs;
544         rq_len = rq_ppas * geo->sec_size;
545
546         bio = bio_map_kern(dev->q, data, rq_len, GFP_KERNEL);
547         if (IS_ERR(bio))
548                 return PTR_ERR(bio);
549
550         bio->bi_iter.bi_sector = 0; /* internal bio */
551         bio_set_op_attrs(bio, REQ_OP_READ, 0);
552
553         rqd->bio = bio;
554         rqd->opcode = NVM_OP_PREAD;
555         rqd->meta_list = meta_list;
556         rqd->nr_ppas = rq_ppas;
557         rqd->ppa_list = ppa_list;
558         rqd->dma_ppa_list = dma_ppa_list;
559         rqd->dma_meta_list = dma_meta_list;
560         rqd->end_io = pblk_end_io_sync;
561         rqd->private = &wait;
562
563         if (pblk_io_aligned(pblk, rq_ppas))
564                 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
565         else
566                 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
567
568         for (i = 0; i < rqd->nr_ppas; ) {
569                 struct ppa_addr ppa;
570                 int pos;
571
572                 w_ptr = pblk_alloc_page(pblk, line, pblk->min_write_pgs);
573                 ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
574                 pos = pblk_dev_ppa_to_pos(geo, ppa);
575
576                 while (test_bit(pos, line->blk_bitmap)) {
577                         w_ptr += pblk->min_write_pgs;
578                         ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
579                         pos = pblk_dev_ppa_to_pos(geo, ppa);
580                 }
581
582                 for (j = 0; j < pblk->min_write_pgs; j++, i++, w_ptr++)
583                         rqd->ppa_list[i] =
584                                 addr_to_gen_ppa(pblk, w_ptr, line->id);
585         }
586
587         ret = pblk_submit_io(pblk, rqd);
588         if (ret) {
589                 pr_err("pblk: I/O submission failed: %d\n", ret);
590                 return ret;
591         }
592
593         if (!wait_for_completion_io_timeout(&wait,
594                                 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
595                 pr_err("pblk: L2P recovery read timed out\n");
596         }
597         atomic_dec(&pblk->inflight_io);
598         reinit_completion(&wait);
599
600         /* This should not happen since the read failed during normal recovery,
601          * but the media works funny sometimes...
602          */
603         if (!rec_round++ && !rqd->error) {
604                 rec_round = 0;
605                 for (i = 0; i < rqd->nr_ppas; i++, r_ptr++) {
606                         u64 lba = le64_to_cpu(meta_list[i].lba);
607
608                         if (lba == ADDR_EMPTY || lba > pblk->rl.nr_secs)
609                                 continue;
610
611                         pblk_update_map(pblk, lba, rqd->ppa_list[i]);
612                 }
613         }
614
615         /* Reached the end of the written line */
616         if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
617                 int pad_secs, nr_error_bits, bit;
618                 int ret;
619
620                 bit = find_first_bit((void *)&rqd->ppa_status, rqd->nr_ppas);
621                 nr_error_bits = rqd->nr_ppas - bit;
622
623                 /* Roll back failed sectors */
624                 line->cur_sec -= nr_error_bits;
625                 line->left_msecs += nr_error_bits;
626                 bitmap_clear(line->map_bitmap, line->cur_sec, nr_error_bits);
627
628                 pad_secs = pblk_pad_distance(pblk);
629                 if (pad_secs > line->left_msecs)
630                         pad_secs = line->left_msecs;
631
632                 ret = pblk_recov_pad_oob(pblk, line, pad_secs);
633                 if (ret)
634                         pr_err("pblk: OOB padding failed (err:%d)\n", ret);
635
636                 ret = pblk_recov_read_oob(pblk, line, p, r_ptr);
637                 if (ret)
638                         pr_err("pblk: OOB read failed (err:%d)\n", ret);
639
640                 left_ppas = 0;
641         }
642
643         left_ppas -= rq_ppas;
644         if (left_ppas > 0)
645                 goto next_rq;
646
647         return ret;
648 }
649
650 static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line,
651                                struct pblk_recov_alloc p, int *done)
652 {
653         struct nvm_tgt_dev *dev = pblk->dev;
654         struct nvm_geo *geo = &dev->geo;
655         struct ppa_addr *ppa_list;
656         struct pblk_sec_meta *meta_list;
657         struct nvm_rq *rqd;
658         struct bio *bio;
659         void *data;
660         dma_addr_t dma_ppa_list, dma_meta_list;
661         u64 paddr;
662         int rq_ppas, rq_len;
663         int i, j;
664         int ret = 0;
665         int left_ppas = pblk_calc_sec_in_line(pblk, line);
666         DECLARE_COMPLETION_ONSTACK(wait);
667
668         ppa_list = p.ppa_list;
669         meta_list = p.meta_list;
670         rqd = p.rqd;
671         data = p.data;
672         dma_ppa_list = p.dma_ppa_list;
673         dma_meta_list = p.dma_meta_list;
674
675         *done = 1;
676
677 next_rq:
678         memset(rqd, 0, pblk_g_rq_size);
679
680         rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
681         if (!rq_ppas)
682                 rq_ppas = pblk->min_write_pgs;
683         rq_len = rq_ppas * geo->sec_size;
684
685         bio = bio_map_kern(dev->q, data, rq_len, GFP_KERNEL);
686         if (IS_ERR(bio))
687                 return PTR_ERR(bio);
688
689         bio->bi_iter.bi_sector = 0; /* internal bio */
690         bio_set_op_attrs(bio, REQ_OP_READ, 0);
691
692         rqd->bio = bio;
693         rqd->opcode = NVM_OP_PREAD;
694         rqd->meta_list = meta_list;
695         rqd->nr_ppas = rq_ppas;
696         rqd->ppa_list = ppa_list;
697         rqd->dma_ppa_list = dma_ppa_list;
698         rqd->dma_meta_list = dma_meta_list;
699         rqd->end_io = pblk_end_io_sync;
700         rqd->private = &wait;
701
702         if (pblk_io_aligned(pblk, rq_ppas))
703                 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
704         else
705                 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
706
707         for (i = 0; i < rqd->nr_ppas; ) {
708                 struct ppa_addr ppa;
709                 int pos;
710
711                 paddr = pblk_alloc_page(pblk, line, pblk->min_write_pgs);
712                 ppa = addr_to_gen_ppa(pblk, paddr, line->id);
713                 pos = pblk_dev_ppa_to_pos(geo, ppa);
714
715                 while (test_bit(pos, line->blk_bitmap)) {
716                         paddr += pblk->min_write_pgs;
717                         ppa = addr_to_gen_ppa(pblk, paddr, line->id);
718                         pos = pblk_dev_ppa_to_pos(geo, ppa);
719                 }
720
721                 for (j = 0; j < pblk->min_write_pgs; j++, i++, paddr++)
722                         rqd->ppa_list[i] =
723                                 addr_to_gen_ppa(pblk, paddr, line->id);
724         }
725
726         ret = pblk_submit_io(pblk, rqd);
727         if (ret) {
728                 pr_err("pblk: I/O submission failed: %d\n", ret);
729                 bio_put(bio);
730                 return ret;
731         }
732
733         if (!wait_for_completion_io_timeout(&wait,
734                                 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
735                 pr_err("pblk: L2P recovery read timed out\n");
736         }
737         atomic_dec(&pblk->inflight_io);
738         reinit_completion(&wait);
739
740         /* Reached the end of the written line */
741         if (rqd->error) {
742                 int nr_error_bits, bit;
743
744                 bit = find_first_bit((void *)&rqd->ppa_status, rqd->nr_ppas);
745                 nr_error_bits = rqd->nr_ppas - bit;
746
747                 /* Roll back failed sectors */
748                 line->cur_sec -= nr_error_bits;
749                 line->left_msecs += nr_error_bits;
750                 bitmap_clear(line->map_bitmap, line->cur_sec, nr_error_bits);
751
752                 left_ppas = 0;
753                 rqd->nr_ppas = bit;
754
755                 if (rqd->error != NVM_RSP_ERR_EMPTYPAGE)
756                         *done = 0;
757         }
758
759         for (i = 0; i < rqd->nr_ppas; i++) {
760                 u64 lba = le64_to_cpu(meta_list[i].lba);
761
762                 if (lba == ADDR_EMPTY || lba > pblk->rl.nr_secs)
763                         continue;
764
765                 pblk_update_map(pblk, lba, rqd->ppa_list[i]);
766         }
767
768         left_ppas -= rq_ppas;
769         if (left_ppas > 0)
770                 goto next_rq;
771
772         return ret;
773 }
774
775 /* Scan line for lbas on out of bound area */
776 static int pblk_recov_l2p_from_oob(struct pblk *pblk, struct pblk_line *line)
777 {
778         struct nvm_tgt_dev *dev = pblk->dev;
779         struct nvm_geo *geo = &dev->geo;
780         struct nvm_rq *rqd;
781         struct ppa_addr *ppa_list;
782         struct pblk_sec_meta *meta_list;
783         struct pblk_recov_alloc p;
784         void *data;
785         dma_addr_t dma_ppa_list, dma_meta_list;
786         int done, ret = 0;
787
788         rqd = pblk_alloc_rqd(pblk, READ);
789         if (IS_ERR(rqd))
790                 return PTR_ERR(rqd);
791
792         meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_meta_list);
793         if (!meta_list) {
794                 ret = -ENOMEM;
795                 goto free_rqd;
796         }
797
798         ppa_list = (void *)(meta_list) + pblk_dma_meta_size;
799         dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
800
801         data = kcalloc(pblk->max_write_pgs, geo->sec_size, GFP_KERNEL);
802         if (!data) {
803                 ret = -ENOMEM;
804                 goto free_meta_list;
805         }
806
807         p.ppa_list = ppa_list;
808         p.meta_list = meta_list;
809         p.rqd = rqd;
810         p.data = data;
811         p.dma_ppa_list = dma_ppa_list;
812         p.dma_meta_list = dma_meta_list;
813
814         ret = pblk_recov_scan_oob(pblk, line, p, &done);
815         if (ret) {
816                 pr_err("pblk: could not recover L2P from OOB\n");
817                 goto out;
818         }
819
820         if (!done) {
821                 ret = pblk_recov_scan_all_oob(pblk, line, p);
822                 if (ret) {
823                         pr_err("pblk: could not recover L2P from OOB\n");
824                         goto out;
825                 }
826         }
827
828         if (pblk_line_is_full(line))
829                 pblk_line_recov_close(pblk, line);
830
831 out:
832         kfree(data);
833 free_meta_list:
834         nvm_dev_dma_free(dev->parent, meta_list, dma_meta_list);
835 free_rqd:
836         pblk_free_rqd(pblk, rqd, READ);
837
838         return ret;
839 }
840
841 /* Insert lines ordered by sequence number (seq_num) on list */
842 static void pblk_recov_line_add_ordered(struct list_head *head,
843                                         struct pblk_line *line)
844 {
845         struct pblk_line *t = NULL;
846
847         list_for_each_entry(t, head, list)
848                 if (t->seq_nr > line->seq_nr)
849                         break;
850
851         __list_add(&line->list, t->list.prev, &t->list);
852 }
853
854 struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
855 {
856         struct nvm_tgt_dev *dev = pblk->dev;
857         struct nvm_geo *geo = &dev->geo;
858         struct pblk_line_meta *lm = &pblk->lm;
859         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
860         struct pblk_line *line, *tline, *data_line = NULL;
861         struct pblk_smeta *smeta;
862         struct pblk_emeta *emeta;
863         struct line_smeta *smeta_buf;
864         int found_lines = 0, recovered_lines = 0, open_lines = 0;
865         int is_next = 0;
866         int meta_line;
867         int i, valid_uuid = 0;
868         LIST_HEAD(recov_list);
869
870         /* TODO: Implement FTL snapshot */
871
872         /* Scan recovery - takes place when FTL snapshot fails */
873         spin_lock(&l_mg->free_lock);
874         meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
875         set_bit(meta_line, &l_mg->meta_bitmap);
876         smeta = l_mg->sline_meta[meta_line];
877         emeta = l_mg->eline_meta[meta_line];
878         smeta_buf = (struct line_smeta *)smeta;
879         spin_unlock(&l_mg->free_lock);
880
881         /* Order data lines using their sequence number */
882         for (i = 0; i < l_mg->nr_lines; i++) {
883                 u32 crc;
884
885                 line = &pblk->lines[i];
886
887                 memset(smeta, 0, lm->smeta_len);
888                 line->smeta = smeta;
889                 line->lun_bitmap = ((void *)(smeta_buf)) +
890                                                 sizeof(struct line_smeta);
891
892                 /* Lines that cannot be read are assumed as not written here */
893                 if (pblk_line_read_smeta(pblk, line))
894                         continue;
895
896                 crc = pblk_calc_smeta_crc(pblk, smeta_buf);
897                 if (le32_to_cpu(smeta_buf->crc) != crc)
898                         continue;
899
900                 if (le32_to_cpu(smeta_buf->header.identifier) != PBLK_MAGIC)
901                         continue;
902
903                 if (le16_to_cpu(smeta_buf->header.version) != 1) {
904                         pr_err("pblk: found incompatible line version %u\n",
905                                         smeta_buf->header.version);
906                         return ERR_PTR(-EINVAL);
907                 }
908
909                 /* The first valid instance uuid is used for initialization */
910                 if (!valid_uuid) {
911                         memcpy(pblk->instance_uuid, smeta_buf->header.uuid, 16);
912                         valid_uuid = 1;
913                 }
914
915                 if (memcmp(pblk->instance_uuid, smeta_buf->header.uuid, 16)) {
916                         pr_debug("pblk: ignore line %u due to uuid mismatch\n",
917                                         i);
918                         continue;
919                 }
920
921                 /* Update line metadata */
922                 spin_lock(&line->lock);
923                 line->id = le32_to_cpu(smeta_buf->header.id);
924                 line->type = le16_to_cpu(smeta_buf->header.type);
925                 line->seq_nr = le64_to_cpu(smeta_buf->seq_nr);
926                 spin_unlock(&line->lock);
927
928                 /* Update general metadata */
929                 spin_lock(&l_mg->free_lock);
930                 if (line->seq_nr >= l_mg->d_seq_nr)
931                         l_mg->d_seq_nr = line->seq_nr + 1;
932                 l_mg->nr_free_lines--;
933                 spin_unlock(&l_mg->free_lock);
934
935                 if (pblk_line_recov_alloc(pblk, line))
936                         goto out;
937
938                 pblk_recov_line_add_ordered(&recov_list, line);
939                 found_lines++;
940                 pr_debug("pblk: recovering data line %d, seq:%llu\n",
941                                                 line->id, smeta_buf->seq_nr);
942         }
943
944         if (!found_lines) {
945                 pblk_setup_uuid(pblk);
946
947                 spin_lock(&l_mg->free_lock);
948                 WARN_ON_ONCE(!test_and_clear_bit(meta_line,
949                                                         &l_mg->meta_bitmap));
950                 spin_unlock(&l_mg->free_lock);
951
952                 goto out;
953         }
954
955         /* Verify closed blocks and recover this portion of L2P table*/
956         list_for_each_entry_safe(line, tline, &recov_list, list) {
957                 int off, nr_bb;
958
959                 recovered_lines++;
960                 /* Calculate where emeta starts based on the line bb */
961                 off = lm->sec_per_line - lm->emeta_sec[0];
962                 nr_bb = bitmap_weight(line->blk_bitmap, lm->blk_per_line);
963                 off -= nr_bb * geo->sec_per_pl;
964
965                 line->emeta_ssec = off;
966                 line->emeta = emeta;
967                 memset(line->emeta->buf, 0, lm->emeta_len[0]);
968
969                 if (pblk_line_read_emeta(pblk, line, line->emeta->buf)) {
970                         pblk_recov_l2p_from_oob(pblk, line);
971                         goto next;
972                 }
973
974                 if (pblk_recov_l2p_from_emeta(pblk, line))
975                         pblk_recov_l2p_from_oob(pblk, line);
976
977 next:
978                 if (pblk_line_is_full(line)) {
979                         struct list_head *move_list;
980
981                         spin_lock(&line->lock);
982                         line->state = PBLK_LINESTATE_CLOSED;
983                         move_list = pblk_line_gc_list(pblk, line);
984                         spin_unlock(&line->lock);
985
986                         spin_lock(&l_mg->gc_lock);
987                         list_move_tail(&line->list, move_list);
988                         spin_unlock(&l_mg->gc_lock);
989
990                         mempool_free(line->map_bitmap, pblk->line_meta_pool);
991                         line->map_bitmap = NULL;
992                         line->smeta = NULL;
993                         line->emeta = NULL;
994                 } else {
995                         if (open_lines > 1)
996                                 pr_err("pblk: failed to recover L2P\n");
997
998                         open_lines++;
999                         line->meta_line = meta_line;
1000                         data_line = line;
1001                 }
1002         }
1003
1004         if (!open_lines) {
1005                 spin_lock(&l_mg->free_lock);
1006                 WARN_ON_ONCE(!test_and_clear_bit(meta_line,
1007                                                         &l_mg->meta_bitmap));
1008                 spin_unlock(&l_mg->free_lock);
1009                 pblk_line_replace_data(pblk);
1010         } else {
1011                 spin_lock(&l_mg->free_lock);
1012                 /* Allocate next line for preparation */
1013                 l_mg->data_next = pblk_line_get(pblk);
1014                 if (l_mg->data_next) {
1015                         l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1016                         l_mg->data_next->type = PBLK_LINETYPE_DATA;
1017                         is_next = 1;
1018                 }
1019                 spin_unlock(&l_mg->free_lock);
1020         }
1021
1022         if (is_next) {
1023                 pblk_line_erase(pblk, l_mg->data_next);
1024                 pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
1025         }
1026
1027 out:
1028         if (found_lines != recovered_lines)
1029                 pr_err("pblk: failed to recover all found lines %d/%d\n",
1030                                                 found_lines, recovered_lines);
1031
1032         return data_line;
1033 }
1034
1035 /*
1036  * Pad current line
1037  */
1038 int pblk_recov_pad(struct pblk *pblk)
1039 {
1040         struct pblk_line *line;
1041         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1042         int left_msecs;
1043         int ret = 0;
1044
1045         spin_lock(&l_mg->free_lock);
1046         line = l_mg->data_line;
1047         left_msecs = line->left_msecs;
1048         spin_unlock(&l_mg->free_lock);
1049
1050         ret = pblk_recov_pad_oob(pblk, line, left_msecs);
1051         if (ret) {
1052                 pr_err("pblk: Tear down padding failed (%d)\n", ret);
1053                 return ret;
1054         }
1055
1056         pblk_line_close_meta(pblk, line);
1057         return ret;
1058 }