GNU Linux-libre 5.19-rc6-gnu
[releases.git] / drivers / mtd / mtdoops.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * MTD Oops/Panic logger
4  *
5  * Copyright © 2007 Nokia Corporation. All rights reserved.
6  *
7  * Author: Richard Purdie <rpurdie@openedhand.com>
8  */
9
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/console.h>
13 #include <linux/vmalloc.h>
14 #include <linux/workqueue.h>
15 #include <linux/sched.h>
16 #include <linux/wait.h>
17 #include <linux/delay.h>
18 #include <linux/interrupt.h>
19 #include <linux/timekeeping.h>
20 #include <linux/mtd/mtd.h>
21 #include <linux/kmsg_dump.h>
22
23 /* Maximum MTD partition size */
24 #define MTDOOPS_MAX_MTD_SIZE (8 * 1024 * 1024)
25
26 static unsigned long record_size = 4096;
27 module_param(record_size, ulong, 0400);
28 MODULE_PARM_DESC(record_size,
29                 "record size for MTD OOPS pages in bytes (default 4096)");
30
31 static char mtddev[80];
32 module_param_string(mtddev, mtddev, 80, 0400);
33 MODULE_PARM_DESC(mtddev,
34                 "name or index number of the MTD device to use");
35
36 static int dump_oops = 1;
37 module_param(dump_oops, int, 0600);
38 MODULE_PARM_DESC(dump_oops,
39                 "set to 1 to dump oopses, 0 to only dump panics (default 1)");
40
41 #define MTDOOPS_KERNMSG_MAGIC_v1 0x5d005d00  /* Original */
42 #define MTDOOPS_KERNMSG_MAGIC_v2 0x5d005e00  /* Adds the timestamp */
43
44 struct mtdoops_hdr {
45         u32 seq;
46         u32 magic;
47         ktime_t timestamp;
48 } __packed;
49
50 static struct mtdoops_context {
51         struct kmsg_dumper dump;
52
53         int mtd_index;
54         struct work_struct work_erase;
55         struct work_struct work_write;
56         struct mtd_info *mtd;
57         int oops_pages;
58         int nextpage;
59         int nextcount;
60         unsigned long *oops_page_used;
61
62         unsigned long oops_buf_busy;
63         void *oops_buf;
64 } oops_cxt;
65
66 static void mark_page_used(struct mtdoops_context *cxt, int page)
67 {
68         set_bit(page, cxt->oops_page_used);
69 }
70
71 static void mark_page_unused(struct mtdoops_context *cxt, int page)
72 {
73         clear_bit(page, cxt->oops_page_used);
74 }
75
76 static int page_is_used(struct mtdoops_context *cxt, int page)
77 {
78         return test_bit(page, cxt->oops_page_used);
79 }
80
81 static int mtdoops_erase_block(struct mtdoops_context *cxt, int offset)
82 {
83         struct mtd_info *mtd = cxt->mtd;
84         u32 start_page_offset = mtd_div_by_eb(offset, mtd) * mtd->erasesize;
85         u32 start_page = start_page_offset / record_size;
86         u32 erase_pages = mtd->erasesize / record_size;
87         struct erase_info erase;
88         int ret;
89         int page;
90
91         erase.addr = offset;
92         erase.len = mtd->erasesize;
93
94         ret = mtd_erase(mtd, &erase);
95         if (ret) {
96                 printk(KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] on \"%s\" failed\n",
97                        (unsigned long long)erase.addr,
98                        (unsigned long long)erase.len, mtddev);
99                 return ret;
100         }
101
102         /* Mark pages as unused */
103         for (page = start_page; page < start_page + erase_pages; page++)
104                 mark_page_unused(cxt, page);
105
106         return 0;
107 }
108
109 static void mtdoops_inc_counter(struct mtdoops_context *cxt)
110 {
111         cxt->nextpage++;
112         if (cxt->nextpage >= cxt->oops_pages)
113                 cxt->nextpage = 0;
114         cxt->nextcount++;
115         if (cxt->nextcount == 0xffffffff)
116                 cxt->nextcount = 0;
117
118         if (page_is_used(cxt, cxt->nextpage)) {
119                 schedule_work(&cxt->work_erase);
120                 return;
121         }
122
123         printk(KERN_DEBUG "mtdoops: ready %d, %d (no erase)\n",
124                cxt->nextpage, cxt->nextcount);
125 }
126
127 /* Scheduled work - when we can't proceed without erasing a block */
128 static void mtdoops_workfunc_erase(struct work_struct *work)
129 {
130         struct mtdoops_context *cxt =
131                         container_of(work, struct mtdoops_context, work_erase);
132         struct mtd_info *mtd = cxt->mtd;
133         int i = 0, j, ret, mod;
134
135         /* We were unregistered */
136         if (!mtd)
137                 return;
138
139         mod = (cxt->nextpage * record_size) % mtd->erasesize;
140         if (mod != 0) {
141                 cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / record_size);
142                 if (cxt->nextpage >= cxt->oops_pages)
143                         cxt->nextpage = 0;
144         }
145
146         while ((ret = mtd_block_isbad(mtd, cxt->nextpage * record_size)) > 0) {
147 badblock:
148                 printk(KERN_WARNING "mtdoops: bad block at %08lx\n",
149                        cxt->nextpage * record_size);
150                 i++;
151                 cxt->nextpage = cxt->nextpage + (mtd->erasesize / record_size);
152                 if (cxt->nextpage >= cxt->oops_pages)
153                         cxt->nextpage = 0;
154                 if (i == cxt->oops_pages / (mtd->erasesize / record_size)) {
155                         printk(KERN_ERR "mtdoops: all blocks bad!\n");
156                         return;
157                 }
158         }
159
160         if (ret < 0) {
161                 printk(KERN_ERR "mtdoops: mtd_block_isbad failed, aborting\n");
162                 return;
163         }
164
165         for (j = 0, ret = -1; (j < 3) && (ret < 0); j++)
166                 ret = mtdoops_erase_block(cxt, cxt->nextpage * record_size);
167
168         if (ret >= 0) {
169                 printk(KERN_DEBUG "mtdoops: ready %d, %d\n",
170                        cxt->nextpage, cxt->nextcount);
171                 return;
172         }
173
174         if (ret == -EIO) {
175                 ret = mtd_block_markbad(mtd, cxt->nextpage * record_size);
176                 if (ret < 0 && ret != -EOPNOTSUPP) {
177                         printk(KERN_ERR "mtdoops: block_markbad failed, aborting\n");
178                         return;
179                 }
180         }
181         goto badblock;
182 }
183
184 static void mtdoops_write(struct mtdoops_context *cxt, int panic)
185 {
186         struct mtd_info *mtd = cxt->mtd;
187         size_t retlen;
188         struct mtdoops_hdr *hdr;
189         int ret;
190
191         if (test_and_set_bit(0, &cxt->oops_buf_busy))
192                 return;
193
194         /* Add mtdoops header to the buffer */
195         hdr = (struct mtdoops_hdr *)cxt->oops_buf;
196         hdr->seq = cxt->nextcount;
197         hdr->magic = MTDOOPS_KERNMSG_MAGIC_v2;
198         hdr->timestamp = ktime_get_real();
199
200         if (panic) {
201                 ret = mtd_panic_write(mtd, cxt->nextpage * record_size,
202                                       record_size, &retlen, cxt->oops_buf);
203                 if (ret == -EOPNOTSUPP) {
204                         printk(KERN_ERR "mtdoops: Cannot write from panic without panic_write\n");
205                         goto out;
206                 }
207         } else
208                 ret = mtd_write(mtd, cxt->nextpage * record_size,
209                                 record_size, &retlen, cxt->oops_buf);
210
211         if (retlen != record_size || ret < 0)
212                 printk(KERN_ERR "mtdoops: write failure at %ld (%td of %ld written), error %d\n",
213                        cxt->nextpage * record_size, retlen, record_size, ret);
214         mark_page_used(cxt, cxt->nextpage);
215         memset(cxt->oops_buf, 0xff, record_size);
216
217         mtdoops_inc_counter(cxt);
218 out:
219         clear_bit(0, &cxt->oops_buf_busy);
220 }
221
222 static void mtdoops_workfunc_write(struct work_struct *work)
223 {
224         struct mtdoops_context *cxt =
225                         container_of(work, struct mtdoops_context, work_write);
226
227         mtdoops_write(cxt, 0);
228 }
229
230 static void find_next_position(struct mtdoops_context *cxt)
231 {
232         struct mtd_info *mtd = cxt->mtd;
233         struct mtdoops_hdr hdr;
234         int ret, page, maxpos = 0;
235         u32 maxcount = 0xffffffff;
236         size_t retlen;
237
238         for (page = 0; page < cxt->oops_pages; page++) {
239                 if (mtd_block_isbad(mtd, page * record_size))
240                         continue;
241                 /* Assume the page is used */
242                 mark_page_used(cxt, page);
243                 ret = mtd_read(mtd, page * record_size, sizeof(hdr),
244                                &retlen, (u_char *)&hdr);
245                 if (retlen != sizeof(hdr) ||
246                                 (ret < 0 && !mtd_is_bitflip(ret))) {
247                         printk(KERN_ERR "mtdoops: read failure at %ld (%zu of %zu read), err %d\n",
248                                page * record_size, retlen, sizeof(hdr), ret);
249                         continue;
250                 }
251
252                 if (hdr.seq == 0xffffffff && hdr.magic == 0xffffffff)
253                         mark_page_unused(cxt, page);
254                 if (hdr.seq == 0xffffffff ||
255                     (hdr.magic != MTDOOPS_KERNMSG_MAGIC_v1 &&
256                      hdr.magic != MTDOOPS_KERNMSG_MAGIC_v2))
257                         continue;
258                 if (maxcount == 0xffffffff) {
259                         maxcount = hdr.seq;
260                         maxpos = page;
261                 } else if (hdr.seq < 0x40000000 && maxcount > 0xc0000000) {
262                         maxcount = hdr.seq;
263                         maxpos = page;
264                 } else if (hdr.seq > maxcount && hdr.seq < 0xc0000000) {
265                         maxcount = hdr.seq;
266                         maxpos = page;
267                 } else if (hdr.seq > maxcount && hdr.seq > 0xc0000000
268                                         && maxcount > 0x80000000) {
269                         maxcount = hdr.seq;
270                         maxpos = page;
271                 }
272         }
273         if (maxcount == 0xffffffff) {
274                 cxt->nextpage = cxt->oops_pages - 1;
275                 cxt->nextcount = 0;
276         }
277         else {
278                 cxt->nextpage = maxpos;
279                 cxt->nextcount = maxcount;
280         }
281
282         mtdoops_inc_counter(cxt);
283 }
284
285 static void mtdoops_do_dump(struct kmsg_dumper *dumper,
286                             enum kmsg_dump_reason reason)
287 {
288         struct mtdoops_context *cxt = container_of(dumper,
289                         struct mtdoops_context, dump);
290         struct kmsg_dump_iter iter;
291
292         /* Only dump oopses if dump_oops is set */
293         if (reason == KMSG_DUMP_OOPS && !dump_oops)
294                 return;
295
296         kmsg_dump_rewind(&iter);
297
298         if (test_and_set_bit(0, &cxt->oops_buf_busy))
299                 return;
300         kmsg_dump_get_buffer(&iter, true,
301                              cxt->oops_buf + sizeof(struct mtdoops_hdr),
302                              record_size - sizeof(struct mtdoops_hdr), NULL);
303         clear_bit(0, &cxt->oops_buf_busy);
304
305         if (reason != KMSG_DUMP_OOPS) {
306                 /* Panics must be written immediately */
307                 mtdoops_write(cxt, 1);
308         } else {
309                 /* For other cases, schedule work to write it "nicely" */
310                 schedule_work(&cxt->work_write);
311         }
312 }
313
314 static void mtdoops_notify_add(struct mtd_info *mtd)
315 {
316         struct mtdoops_context *cxt = &oops_cxt;
317         u64 mtdoops_pages = div_u64(mtd->size, record_size);
318         int err;
319
320         if (!strcmp(mtd->name, mtddev))
321                 cxt->mtd_index = mtd->index;
322
323         if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0)
324                 return;
325
326         if (mtd->size < mtd->erasesize * 2) {
327                 printk(KERN_ERR "mtdoops: MTD partition %d not big enough for mtdoops\n",
328                        mtd->index);
329                 return;
330         }
331         if (mtd->erasesize < record_size) {
332                 printk(KERN_ERR "mtdoops: eraseblock size of MTD partition %d too small\n",
333                        mtd->index);
334                 return;
335         }
336         if (mtd->size > MTDOOPS_MAX_MTD_SIZE) {
337                 printk(KERN_ERR "mtdoops: mtd%d is too large (limit is %d MiB)\n",
338                        mtd->index, MTDOOPS_MAX_MTD_SIZE / 1024 / 1024);
339                 return;
340         }
341
342         /* oops_page_used is a bit field */
343         cxt->oops_page_used =
344                 vmalloc(array_size(sizeof(unsigned long),
345                                    DIV_ROUND_UP(mtdoops_pages,
346                                                 BITS_PER_LONG)));
347         if (!cxt->oops_page_used) {
348                 printk(KERN_ERR "mtdoops: could not allocate page array\n");
349                 return;
350         }
351
352         cxt->dump.max_reason = KMSG_DUMP_OOPS;
353         cxt->dump.dump = mtdoops_do_dump;
354         err = kmsg_dump_register(&cxt->dump);
355         if (err) {
356                 printk(KERN_ERR "mtdoops: registering kmsg dumper failed, error %d\n", err);
357                 vfree(cxt->oops_page_used);
358                 cxt->oops_page_used = NULL;
359                 return;
360         }
361
362         cxt->mtd = mtd;
363         cxt->oops_pages = (int)mtd->size / record_size;
364         find_next_position(cxt);
365         printk(KERN_INFO "mtdoops: Attached to MTD device %d\n", mtd->index);
366 }
367
368 static void mtdoops_notify_remove(struct mtd_info *mtd)
369 {
370         struct mtdoops_context *cxt = &oops_cxt;
371
372         if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0)
373                 return;
374
375         if (kmsg_dump_unregister(&cxt->dump) < 0)
376                 printk(KERN_WARNING "mtdoops: could not unregister kmsg_dumper\n");
377
378         cxt->mtd = NULL;
379         flush_work(&cxt->work_erase);
380         flush_work(&cxt->work_write);
381 }
382
383
384 static struct mtd_notifier mtdoops_notifier = {
385         .add    = mtdoops_notify_add,
386         .remove = mtdoops_notify_remove,
387 };
388
389 static int __init mtdoops_init(void)
390 {
391         struct mtdoops_context *cxt = &oops_cxt;
392         int mtd_index;
393         char *endp;
394
395         if (strlen(mtddev) == 0) {
396                 printk(KERN_ERR "mtdoops: mtd device (mtddev=name/number) must be supplied\n");
397                 return -EINVAL;
398         }
399         if ((record_size & 4095) != 0) {
400                 printk(KERN_ERR "mtdoops: record_size must be a multiple of 4096\n");
401                 return -EINVAL;
402         }
403         if (record_size < 4096) {
404                 printk(KERN_ERR "mtdoops: record_size must be over 4096 bytes\n");
405                 return -EINVAL;
406         }
407
408         /* Setup the MTD device to use */
409         cxt->mtd_index = -1;
410         mtd_index = simple_strtoul(mtddev, &endp, 0);
411         if (*endp == '\0')
412                 cxt->mtd_index = mtd_index;
413
414         cxt->oops_buf = vmalloc(record_size);
415         if (!cxt->oops_buf)
416                 return -ENOMEM;
417         memset(cxt->oops_buf, 0xff, record_size);
418         cxt->oops_buf_busy = 0;
419
420         INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase);
421         INIT_WORK(&cxt->work_write, mtdoops_workfunc_write);
422
423         register_mtd_user(&mtdoops_notifier);
424         return 0;
425 }
426
427 static void __exit mtdoops_exit(void)
428 {
429         struct mtdoops_context *cxt = &oops_cxt;
430
431         unregister_mtd_user(&mtdoops_notifier);
432         vfree(cxt->oops_buf);
433         vfree(cxt->oops_page_used);
434 }
435
436
437 module_init(mtdoops_init);
438 module_exit(mtdoops_exit);
439
440 MODULE_LICENSE("GPL");
441 MODULE_AUTHOR("Richard Purdie <rpurdie@openedhand.com>");
442 MODULE_DESCRIPTION("MTD Oops/Panic console logger/driver");