GNU Linux-libre 4.14.290-gnu1
[releases.git] / drivers / md / dm-stats.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/errno.h>
3 #include <linux/numa.h>
4 #include <linux/slab.h>
5 #include <linux/rculist.h>
6 #include <linux/threads.h>
7 #include <linux/preempt.h>
8 #include <linux/irqflags.h>
9 #include <linux/vmalloc.h>
10 #include <linux/mm.h>
11 #include <linux/module.h>
12 #include <linux/device-mapper.h>
13
14 #include "dm-core.h"
15 #include "dm-stats.h"
16
17 #define DM_MSG_PREFIX "stats"
18
19 static int dm_stat_need_rcu_barrier;
20
21 /*
22  * Using 64-bit values to avoid overflow (which is a
23  * problem that block/genhd.c's IO accounting has).
24  */
25 struct dm_stat_percpu {
26         unsigned long long sectors[2];
27         unsigned long long ios[2];
28         unsigned long long merges[2];
29         unsigned long long ticks[2];
30         unsigned long long io_ticks[2];
31         unsigned long long io_ticks_total;
32         unsigned long long time_in_queue;
33         unsigned long long *histogram;
34 };
35
36 struct dm_stat_shared {
37         atomic_t in_flight[2];
38         unsigned long long stamp;
39         struct dm_stat_percpu tmp;
40 };
41
42 struct dm_stat {
43         struct list_head list_entry;
44         int id;
45         unsigned stat_flags;
46         size_t n_entries;
47         sector_t start;
48         sector_t end;
49         sector_t step;
50         unsigned n_histogram_entries;
51         unsigned long long *histogram_boundaries;
52         const char *program_id;
53         const char *aux_data;
54         struct rcu_head rcu_head;
55         size_t shared_alloc_size;
56         size_t percpu_alloc_size;
57         size_t histogram_alloc_size;
58         struct dm_stat_percpu *stat_percpu[NR_CPUS];
59         struct dm_stat_shared stat_shared[0];
60 };
61
62 #define STAT_PRECISE_TIMESTAMPS         1
63
64 struct dm_stats_last_position {
65         sector_t last_sector;
66         unsigned last_rw;
67 };
68
69 /*
70  * A typo on the command line could possibly make the kernel run out of memory
71  * and crash. To prevent the crash we account all used memory. We fail if we
72  * exhaust 1/4 of all memory or 1/2 of vmalloc space.
73  */
74 #define DM_STATS_MEMORY_FACTOR          4
75 #define DM_STATS_VMALLOC_FACTOR         2
76
77 static DEFINE_SPINLOCK(shared_memory_lock);
78
79 static unsigned long shared_memory_amount;
80
81 static bool __check_shared_memory(size_t alloc_size)
82 {
83         size_t a;
84
85         a = shared_memory_amount + alloc_size;
86         if (a < shared_memory_amount)
87                 return false;
88         if (a >> PAGE_SHIFT > totalram_pages / DM_STATS_MEMORY_FACTOR)
89                 return false;
90 #ifdef CONFIG_MMU
91         if (a > (VMALLOC_END - VMALLOC_START) / DM_STATS_VMALLOC_FACTOR)
92                 return false;
93 #endif
94         return true;
95 }
96
97 static bool check_shared_memory(size_t alloc_size)
98 {
99         bool ret;
100
101         spin_lock_irq(&shared_memory_lock);
102
103         ret = __check_shared_memory(alloc_size);
104
105         spin_unlock_irq(&shared_memory_lock);
106
107         return ret;
108 }
109
110 static bool claim_shared_memory(size_t alloc_size)
111 {
112         spin_lock_irq(&shared_memory_lock);
113
114         if (!__check_shared_memory(alloc_size)) {
115                 spin_unlock_irq(&shared_memory_lock);
116                 return false;
117         }
118
119         shared_memory_amount += alloc_size;
120
121         spin_unlock_irq(&shared_memory_lock);
122
123         return true;
124 }
125
126 static void free_shared_memory(size_t alloc_size)
127 {
128         unsigned long flags;
129
130         spin_lock_irqsave(&shared_memory_lock, flags);
131
132         if (WARN_ON_ONCE(shared_memory_amount < alloc_size)) {
133                 spin_unlock_irqrestore(&shared_memory_lock, flags);
134                 DMCRIT("Memory usage accounting bug.");
135                 return;
136         }
137
138         shared_memory_amount -= alloc_size;
139
140         spin_unlock_irqrestore(&shared_memory_lock, flags);
141 }
142
143 static void *dm_kvzalloc(size_t alloc_size, int node)
144 {
145         void *p;
146
147         if (!claim_shared_memory(alloc_size))
148                 return NULL;
149
150         p = kvzalloc_node(alloc_size, GFP_KERNEL | __GFP_NOMEMALLOC, node);
151         if (p)
152                 return p;
153
154         free_shared_memory(alloc_size);
155
156         return NULL;
157 }
158
159 static void dm_kvfree(void *ptr, size_t alloc_size)
160 {
161         if (!ptr)
162                 return;
163
164         free_shared_memory(alloc_size);
165
166         kvfree(ptr);
167 }
168
169 static void dm_stat_free(struct rcu_head *head)
170 {
171         int cpu;
172         struct dm_stat *s = container_of(head, struct dm_stat, rcu_head);
173
174         kfree(s->histogram_boundaries);
175         kfree(s->program_id);
176         kfree(s->aux_data);
177         for_each_possible_cpu(cpu) {
178                 dm_kvfree(s->stat_percpu[cpu][0].histogram, s->histogram_alloc_size);
179                 dm_kvfree(s->stat_percpu[cpu], s->percpu_alloc_size);
180         }
181         dm_kvfree(s->stat_shared[0].tmp.histogram, s->histogram_alloc_size);
182         dm_kvfree(s, s->shared_alloc_size);
183 }
184
185 static int dm_stat_in_flight(struct dm_stat_shared *shared)
186 {
187         return atomic_read(&shared->in_flight[READ]) +
188                atomic_read(&shared->in_flight[WRITE]);
189 }
190
191 void dm_stats_init(struct dm_stats *stats)
192 {
193         int cpu;
194         struct dm_stats_last_position *last;
195
196         mutex_init(&stats->mutex);
197         INIT_LIST_HEAD(&stats->list);
198         stats->last = alloc_percpu(struct dm_stats_last_position);
199         for_each_possible_cpu(cpu) {
200                 last = per_cpu_ptr(stats->last, cpu);
201                 last->last_sector = (sector_t)ULLONG_MAX;
202                 last->last_rw = UINT_MAX;
203         }
204 }
205
206 void dm_stats_cleanup(struct dm_stats *stats)
207 {
208         size_t ni;
209         struct dm_stat *s;
210         struct dm_stat_shared *shared;
211
212         while (!list_empty(&stats->list)) {
213                 s = container_of(stats->list.next, struct dm_stat, list_entry);
214                 list_del(&s->list_entry);
215                 for (ni = 0; ni < s->n_entries; ni++) {
216                         shared = &s->stat_shared[ni];
217                         if (WARN_ON(dm_stat_in_flight(shared))) {
218                                 DMCRIT("leaked in-flight counter at index %lu "
219                                        "(start %llu, end %llu, step %llu): reads %d, writes %d",
220                                        (unsigned long)ni,
221                                        (unsigned long long)s->start,
222                                        (unsigned long long)s->end,
223                                        (unsigned long long)s->step,
224                                        atomic_read(&shared->in_flight[READ]),
225                                        atomic_read(&shared->in_flight[WRITE]));
226                         }
227                         cond_resched();
228                 }
229                 dm_stat_free(&s->rcu_head);
230         }
231         free_percpu(stats->last);
232 }
233
234 static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
235                            sector_t step, unsigned stat_flags,
236                            unsigned n_histogram_entries,
237                            unsigned long long *histogram_boundaries,
238                            const char *program_id, const char *aux_data,
239                            void (*suspend_callback)(struct mapped_device *),
240                            void (*resume_callback)(struct mapped_device *),
241                            struct mapped_device *md)
242 {
243         struct list_head *l;
244         struct dm_stat *s, *tmp_s;
245         sector_t n_entries;
246         size_t ni;
247         size_t shared_alloc_size;
248         size_t percpu_alloc_size;
249         size_t histogram_alloc_size;
250         struct dm_stat_percpu *p;
251         int cpu;
252         int ret_id;
253         int r;
254
255         if (end < start || !step)
256                 return -EINVAL;
257
258         n_entries = end - start;
259         if (dm_sector_div64(n_entries, step))
260                 n_entries++;
261
262         if (n_entries != (size_t)n_entries || !(size_t)(n_entries + 1))
263                 return -EOVERFLOW;
264
265         shared_alloc_size = sizeof(struct dm_stat) + (size_t)n_entries * sizeof(struct dm_stat_shared);
266         if ((shared_alloc_size - sizeof(struct dm_stat)) / sizeof(struct dm_stat_shared) != n_entries)
267                 return -EOVERFLOW;
268
269         percpu_alloc_size = (size_t)n_entries * sizeof(struct dm_stat_percpu);
270         if (percpu_alloc_size / sizeof(struct dm_stat_percpu) != n_entries)
271                 return -EOVERFLOW;
272
273         histogram_alloc_size = (n_histogram_entries + 1) * (size_t)n_entries * sizeof(unsigned long long);
274         if (histogram_alloc_size / (n_histogram_entries + 1) != (size_t)n_entries * sizeof(unsigned long long))
275                 return -EOVERFLOW;
276
277         if (!check_shared_memory(shared_alloc_size + histogram_alloc_size +
278                                  num_possible_cpus() * (percpu_alloc_size + histogram_alloc_size)))
279                 return -ENOMEM;
280
281         s = dm_kvzalloc(shared_alloc_size, NUMA_NO_NODE);
282         if (!s)
283                 return -ENOMEM;
284
285         s->stat_flags = stat_flags;
286         s->n_entries = n_entries;
287         s->start = start;
288         s->end = end;
289         s->step = step;
290         s->shared_alloc_size = shared_alloc_size;
291         s->percpu_alloc_size = percpu_alloc_size;
292         s->histogram_alloc_size = histogram_alloc_size;
293
294         s->n_histogram_entries = n_histogram_entries;
295         s->histogram_boundaries = kmemdup(histogram_boundaries,
296                                           s->n_histogram_entries * sizeof(unsigned long long), GFP_KERNEL);
297         if (!s->histogram_boundaries) {
298                 r = -ENOMEM;
299                 goto out;
300         }
301
302         s->program_id = kstrdup(program_id, GFP_KERNEL);
303         if (!s->program_id) {
304                 r = -ENOMEM;
305                 goto out;
306         }
307         s->aux_data = kstrdup(aux_data, GFP_KERNEL);
308         if (!s->aux_data) {
309                 r = -ENOMEM;
310                 goto out;
311         }
312
313         for (ni = 0; ni < n_entries; ni++) {
314                 atomic_set(&s->stat_shared[ni].in_flight[READ], 0);
315                 atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0);
316                 cond_resched();
317         }
318
319         if (s->n_histogram_entries) {
320                 unsigned long long *hi;
321                 hi = dm_kvzalloc(s->histogram_alloc_size, NUMA_NO_NODE);
322                 if (!hi) {
323                         r = -ENOMEM;
324                         goto out;
325                 }
326                 for (ni = 0; ni < n_entries; ni++) {
327                         s->stat_shared[ni].tmp.histogram = hi;
328                         hi += s->n_histogram_entries + 1;
329                         cond_resched();
330                 }
331         }
332
333         for_each_possible_cpu(cpu) {
334                 p = dm_kvzalloc(percpu_alloc_size, cpu_to_node(cpu));
335                 if (!p) {
336                         r = -ENOMEM;
337                         goto out;
338                 }
339                 s->stat_percpu[cpu] = p;
340                 if (s->n_histogram_entries) {
341                         unsigned long long *hi;
342                         hi = dm_kvzalloc(s->histogram_alloc_size, cpu_to_node(cpu));
343                         if (!hi) {
344                                 r = -ENOMEM;
345                                 goto out;
346                         }
347                         for (ni = 0; ni < n_entries; ni++) {
348                                 p[ni].histogram = hi;
349                                 hi += s->n_histogram_entries + 1;
350                                 cond_resched();
351                         }
352                 }
353         }
354
355         /*
356          * Suspend/resume to make sure there is no i/o in flight,
357          * so that newly created statistics will be exact.
358          *
359          * (note: we couldn't suspend earlier because we must not
360          * allocate memory while suspended)
361          */
362         suspend_callback(md);
363
364         mutex_lock(&stats->mutex);
365         s->id = 0;
366         list_for_each(l, &stats->list) {
367                 tmp_s = container_of(l, struct dm_stat, list_entry);
368                 if (WARN_ON(tmp_s->id < s->id)) {
369                         r = -EINVAL;
370                         goto out_unlock_resume;
371                 }
372                 if (tmp_s->id > s->id)
373                         break;
374                 if (unlikely(s->id == INT_MAX)) {
375                         r = -ENFILE;
376                         goto out_unlock_resume;
377                 }
378                 s->id++;
379         }
380         ret_id = s->id;
381         list_add_tail_rcu(&s->list_entry, l);
382         mutex_unlock(&stats->mutex);
383
384         resume_callback(md);
385
386         return ret_id;
387
388 out_unlock_resume:
389         mutex_unlock(&stats->mutex);
390         resume_callback(md);
391 out:
392         dm_stat_free(&s->rcu_head);
393         return r;
394 }
395
396 static struct dm_stat *__dm_stats_find(struct dm_stats *stats, int id)
397 {
398         struct dm_stat *s;
399
400         list_for_each_entry(s, &stats->list, list_entry) {
401                 if (s->id > id)
402                         break;
403                 if (s->id == id)
404                         return s;
405         }
406
407         return NULL;
408 }
409
410 static int dm_stats_delete(struct dm_stats *stats, int id)
411 {
412         struct dm_stat *s;
413         int cpu;
414
415         mutex_lock(&stats->mutex);
416
417         s = __dm_stats_find(stats, id);
418         if (!s) {
419                 mutex_unlock(&stats->mutex);
420                 return -ENOENT;
421         }
422
423         list_del_rcu(&s->list_entry);
424         mutex_unlock(&stats->mutex);
425
426         /*
427          * vfree can't be called from RCU callback
428          */
429         for_each_possible_cpu(cpu)
430                 if (is_vmalloc_addr(s->stat_percpu) ||
431                     is_vmalloc_addr(s->stat_percpu[cpu][0].histogram))
432                         goto do_sync_free;
433         if (is_vmalloc_addr(s) ||
434             is_vmalloc_addr(s->stat_shared[0].tmp.histogram)) {
435 do_sync_free:
436                 synchronize_rcu_expedited();
437                 dm_stat_free(&s->rcu_head);
438         } else {
439                 ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
440                 call_rcu(&s->rcu_head, dm_stat_free);
441         }
442         return 0;
443 }
444
445 static int dm_stats_list(struct dm_stats *stats, const char *program,
446                          char *result, unsigned maxlen)
447 {
448         struct dm_stat *s;
449         sector_t len;
450         unsigned sz = 0;
451
452         /*
453          * Output format:
454          *   <region_id>: <start_sector>+<length> <step> <program_id> <aux_data>
455          */
456
457         mutex_lock(&stats->mutex);
458         list_for_each_entry(s, &stats->list, list_entry) {
459                 if (!program || !strcmp(program, s->program_id)) {
460                         len = s->end - s->start;
461                         DMEMIT("%d: %llu+%llu %llu %s %s", s->id,
462                                 (unsigned long long)s->start,
463                                 (unsigned long long)len,
464                                 (unsigned long long)s->step,
465                                 s->program_id,
466                                 s->aux_data);
467                         if (s->stat_flags & STAT_PRECISE_TIMESTAMPS)
468                                 DMEMIT(" precise_timestamps");
469                         if (s->n_histogram_entries) {
470                                 unsigned i;
471                                 DMEMIT(" histogram:");
472                                 for (i = 0; i < s->n_histogram_entries; i++) {
473                                         if (i)
474                                                 DMEMIT(",");
475                                         DMEMIT("%llu", s->histogram_boundaries[i]);
476                                 }
477                         }
478                         DMEMIT("\n");
479                 }
480                 cond_resched();
481         }
482         mutex_unlock(&stats->mutex);
483
484         return 1;
485 }
486
487 static void dm_stat_round(struct dm_stat *s, struct dm_stat_shared *shared,
488                           struct dm_stat_percpu *p)
489 {
490         /*
491          * This is racy, but so is part_round_stats_single.
492          */
493         unsigned long long now, difference;
494         unsigned in_flight_read, in_flight_write;
495
496         if (likely(!(s->stat_flags & STAT_PRECISE_TIMESTAMPS)))
497                 now = jiffies;
498         else
499                 now = ktime_to_ns(ktime_get());
500
501         difference = now - shared->stamp;
502         if (!difference)
503                 return;
504
505         in_flight_read = (unsigned)atomic_read(&shared->in_flight[READ]);
506         in_flight_write = (unsigned)atomic_read(&shared->in_flight[WRITE]);
507         if (in_flight_read)
508                 p->io_ticks[READ] += difference;
509         if (in_flight_write)
510                 p->io_ticks[WRITE] += difference;
511         if (in_flight_read + in_flight_write) {
512                 p->io_ticks_total += difference;
513                 p->time_in_queue += (in_flight_read + in_flight_write) * difference;
514         }
515         shared->stamp = now;
516 }
517
518 static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
519                               int idx, sector_t len,
520                               struct dm_stats_aux *stats_aux, bool end,
521                               unsigned long duration_jiffies)
522 {
523         struct dm_stat_shared *shared = &s->stat_shared[entry];
524         struct dm_stat_percpu *p;
525
526         /*
527          * For strict correctness we should use local_irq_save/restore
528          * instead of preempt_disable/enable.
529          *
530          * preempt_disable/enable is racy if the driver finishes bios
531          * from non-interrupt context as well as from interrupt context
532          * or from more different interrupts.
533          *
534          * On 64-bit architectures the race only results in not counting some
535          * events, so it is acceptable.  On 32-bit architectures the race could
536          * cause the counter going off by 2^32, so we need to do proper locking
537          * there.
538          *
539          * part_stat_lock()/part_stat_unlock() have this race too.
540          */
541 #if BITS_PER_LONG == 32
542         unsigned long flags;
543         local_irq_save(flags);
544 #else
545         preempt_disable();
546 #endif
547         p = &s->stat_percpu[smp_processor_id()][entry];
548
549         if (!end) {
550                 dm_stat_round(s, shared, p);
551                 atomic_inc(&shared->in_flight[idx]);
552         } else {
553                 unsigned long long duration;
554                 dm_stat_round(s, shared, p);
555                 atomic_dec(&shared->in_flight[idx]);
556                 p->sectors[idx] += len;
557                 p->ios[idx] += 1;
558                 p->merges[idx] += stats_aux->merged;
559                 if (!(s->stat_flags & STAT_PRECISE_TIMESTAMPS)) {
560                         p->ticks[idx] += duration_jiffies;
561                         duration = jiffies_to_msecs(duration_jiffies);
562                 } else {
563                         p->ticks[idx] += stats_aux->duration_ns;
564                         duration = stats_aux->duration_ns;
565                 }
566                 if (s->n_histogram_entries) {
567                         unsigned lo = 0, hi = s->n_histogram_entries + 1;
568                         while (lo + 1 < hi) {
569                                 unsigned mid = (lo + hi) / 2;
570                                 if (s->histogram_boundaries[mid - 1] > duration) {
571                                         hi = mid;
572                                 } else {
573                                         lo = mid;
574                                 }
575
576                         }
577                         p->histogram[lo]++;
578                 }
579         }
580
581 #if BITS_PER_LONG == 32
582         local_irq_restore(flags);
583 #else
584         preempt_enable();
585 #endif
586 }
587
588 static void __dm_stat_bio(struct dm_stat *s, int bi_rw,
589                           sector_t bi_sector, sector_t end_sector,
590                           bool end, unsigned long duration_jiffies,
591                           struct dm_stats_aux *stats_aux)
592 {
593         sector_t rel_sector, offset, todo, fragment_len;
594         size_t entry;
595
596         if (end_sector <= s->start || bi_sector >= s->end)
597                 return;
598         if (unlikely(bi_sector < s->start)) {
599                 rel_sector = 0;
600                 todo = end_sector - s->start;
601         } else {
602                 rel_sector = bi_sector - s->start;
603                 todo = end_sector - bi_sector;
604         }
605         if (unlikely(end_sector > s->end))
606                 todo -= (end_sector - s->end);
607
608         offset = dm_sector_div64(rel_sector, s->step);
609         entry = rel_sector;
610         do {
611                 if (WARN_ON_ONCE(entry >= s->n_entries)) {
612                         DMCRIT("Invalid area access in region id %d", s->id);
613                         return;
614                 }
615                 fragment_len = todo;
616                 if (fragment_len > s->step - offset)
617                         fragment_len = s->step - offset;
618                 dm_stat_for_entry(s, entry, bi_rw, fragment_len,
619                                   stats_aux, end, duration_jiffies);
620                 todo -= fragment_len;
621                 entry++;
622                 offset = 0;
623         } while (unlikely(todo != 0));
624 }
625
626 void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
627                          sector_t bi_sector, unsigned bi_sectors, bool end,
628                          unsigned long duration_jiffies,
629                          struct dm_stats_aux *stats_aux)
630 {
631         struct dm_stat *s;
632         sector_t end_sector;
633         struct dm_stats_last_position *last;
634         bool got_precise_time;
635
636         if (unlikely(!bi_sectors))
637                 return;
638
639         end_sector = bi_sector + bi_sectors;
640
641         if (!end) {
642                 /*
643                  * A race condition can at worst result in the merged flag being
644                  * misrepresented, so we don't have to disable preemption here.
645                  */
646                 last = raw_cpu_ptr(stats->last);
647                 stats_aux->merged =
648                         (bi_sector == (ACCESS_ONCE(last->last_sector) &&
649                                        ((bi_rw == WRITE) ==
650                                         (ACCESS_ONCE(last->last_rw) == WRITE))
651                                        ));
652                 ACCESS_ONCE(last->last_sector) = end_sector;
653                 ACCESS_ONCE(last->last_rw) = bi_rw;
654         }
655
656         rcu_read_lock();
657
658         got_precise_time = false;
659         list_for_each_entry_rcu(s, &stats->list, list_entry) {
660                 if (s->stat_flags & STAT_PRECISE_TIMESTAMPS && !got_precise_time) {
661                         if (!end)
662                                 stats_aux->duration_ns = ktime_to_ns(ktime_get());
663                         else
664                                 stats_aux->duration_ns = ktime_to_ns(ktime_get()) - stats_aux->duration_ns;
665                         got_precise_time = true;
666                 }
667                 __dm_stat_bio(s, bi_rw, bi_sector, end_sector, end, duration_jiffies, stats_aux);
668         }
669
670         rcu_read_unlock();
671 }
672
673 static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared,
674                                                    struct dm_stat *s, size_t x)
675 {
676         int cpu;
677         struct dm_stat_percpu *p;
678
679         local_irq_disable();
680         p = &s->stat_percpu[smp_processor_id()][x];
681         dm_stat_round(s, shared, p);
682         local_irq_enable();
683
684         shared->tmp.sectors[READ] = 0;
685         shared->tmp.sectors[WRITE] = 0;
686         shared->tmp.ios[READ] = 0;
687         shared->tmp.ios[WRITE] = 0;
688         shared->tmp.merges[READ] = 0;
689         shared->tmp.merges[WRITE] = 0;
690         shared->tmp.ticks[READ] = 0;
691         shared->tmp.ticks[WRITE] = 0;
692         shared->tmp.io_ticks[READ] = 0;
693         shared->tmp.io_ticks[WRITE] = 0;
694         shared->tmp.io_ticks_total = 0;
695         shared->tmp.time_in_queue = 0;
696
697         if (s->n_histogram_entries)
698                 memset(shared->tmp.histogram, 0, (s->n_histogram_entries + 1) * sizeof(unsigned long long));
699
700         for_each_possible_cpu(cpu) {
701                 p = &s->stat_percpu[cpu][x];
702                 shared->tmp.sectors[READ] += ACCESS_ONCE(p->sectors[READ]);
703                 shared->tmp.sectors[WRITE] += ACCESS_ONCE(p->sectors[WRITE]);
704                 shared->tmp.ios[READ] += ACCESS_ONCE(p->ios[READ]);
705                 shared->tmp.ios[WRITE] += ACCESS_ONCE(p->ios[WRITE]);
706                 shared->tmp.merges[READ] += ACCESS_ONCE(p->merges[READ]);
707                 shared->tmp.merges[WRITE] += ACCESS_ONCE(p->merges[WRITE]);
708                 shared->tmp.ticks[READ] += ACCESS_ONCE(p->ticks[READ]);
709                 shared->tmp.ticks[WRITE] += ACCESS_ONCE(p->ticks[WRITE]);
710                 shared->tmp.io_ticks[READ] += ACCESS_ONCE(p->io_ticks[READ]);
711                 shared->tmp.io_ticks[WRITE] += ACCESS_ONCE(p->io_ticks[WRITE]);
712                 shared->tmp.io_ticks_total += ACCESS_ONCE(p->io_ticks_total);
713                 shared->tmp.time_in_queue += ACCESS_ONCE(p->time_in_queue);
714                 if (s->n_histogram_entries) {
715                         unsigned i;
716                         for (i = 0; i < s->n_histogram_entries + 1; i++)
717                                 shared->tmp.histogram[i] += ACCESS_ONCE(p->histogram[i]);
718                 }
719         }
720 }
721
722 static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end,
723                             bool init_tmp_percpu_totals)
724 {
725         size_t x;
726         struct dm_stat_shared *shared;
727         struct dm_stat_percpu *p;
728
729         for (x = idx_start; x < idx_end; x++) {
730                 shared = &s->stat_shared[x];
731                 if (init_tmp_percpu_totals)
732                         __dm_stat_init_temporary_percpu_totals(shared, s, x);
733                 local_irq_disable();
734                 p = &s->stat_percpu[smp_processor_id()][x];
735                 p->sectors[READ] -= shared->tmp.sectors[READ];
736                 p->sectors[WRITE] -= shared->tmp.sectors[WRITE];
737                 p->ios[READ] -= shared->tmp.ios[READ];
738                 p->ios[WRITE] -= shared->tmp.ios[WRITE];
739                 p->merges[READ] -= shared->tmp.merges[READ];
740                 p->merges[WRITE] -= shared->tmp.merges[WRITE];
741                 p->ticks[READ] -= shared->tmp.ticks[READ];
742                 p->ticks[WRITE] -= shared->tmp.ticks[WRITE];
743                 p->io_ticks[READ] -= shared->tmp.io_ticks[READ];
744                 p->io_ticks[WRITE] -= shared->tmp.io_ticks[WRITE];
745                 p->io_ticks_total -= shared->tmp.io_ticks_total;
746                 p->time_in_queue -= shared->tmp.time_in_queue;
747                 local_irq_enable();
748                 if (s->n_histogram_entries) {
749                         unsigned i;
750                         for (i = 0; i < s->n_histogram_entries + 1; i++) {
751                                 local_irq_disable();
752                                 p = &s->stat_percpu[smp_processor_id()][x];
753                                 p->histogram[i] -= shared->tmp.histogram[i];
754                                 local_irq_enable();
755                         }
756                 }
757                 cond_resched();
758         }
759 }
760
761 static int dm_stats_clear(struct dm_stats *stats, int id)
762 {
763         struct dm_stat *s;
764
765         mutex_lock(&stats->mutex);
766
767         s = __dm_stats_find(stats, id);
768         if (!s) {
769                 mutex_unlock(&stats->mutex);
770                 return -ENOENT;
771         }
772
773         __dm_stat_clear(s, 0, s->n_entries, true);
774
775         mutex_unlock(&stats->mutex);
776
777         return 1;
778 }
779
780 /*
781  * This is like jiffies_to_msec, but works for 64-bit values.
782  */
783 static unsigned long long dm_jiffies_to_msec64(struct dm_stat *s, unsigned long long j)
784 {
785         unsigned long long result;
786         unsigned mult;
787
788         if (s->stat_flags & STAT_PRECISE_TIMESTAMPS)
789                 return j;
790
791         result = 0;
792         if (j)
793                 result = jiffies_to_msecs(j & 0x3fffff);
794         if (j >= 1 << 22) {
795                 mult = jiffies_to_msecs(1 << 22);
796                 result += (unsigned long long)mult * (unsigned long long)jiffies_to_msecs((j >> 22) & 0x3fffff);
797         }
798         if (j >= 1ULL << 44)
799                 result += (unsigned long long)mult * (unsigned long long)mult * (unsigned long long)jiffies_to_msecs(j >> 44);
800
801         return result;
802 }
803
804 static int dm_stats_print(struct dm_stats *stats, int id,
805                           size_t idx_start, size_t idx_len,
806                           bool clear, char *result, unsigned maxlen)
807 {
808         unsigned sz = 0;
809         struct dm_stat *s;
810         size_t x;
811         sector_t start, end, step;
812         size_t idx_end;
813         struct dm_stat_shared *shared;
814
815         /*
816          * Output format:
817          *   <start_sector>+<length> counters
818          */
819
820         mutex_lock(&stats->mutex);
821
822         s = __dm_stats_find(stats, id);
823         if (!s) {
824                 mutex_unlock(&stats->mutex);
825                 return -ENOENT;
826         }
827
828         idx_end = idx_start + idx_len;
829         if (idx_end < idx_start ||
830             idx_end > s->n_entries)
831                 idx_end = s->n_entries;
832
833         if (idx_start > idx_end)
834                 idx_start = idx_end;
835
836         step = s->step;
837         start = s->start + (step * idx_start);
838
839         for (x = idx_start; x < idx_end; x++, start = end) {
840                 shared = &s->stat_shared[x];
841                 end = start + step;
842                 if (unlikely(end > s->end))
843                         end = s->end;
844
845                 __dm_stat_init_temporary_percpu_totals(shared, s, x);
846
847                 DMEMIT("%llu+%llu %llu %llu %llu %llu %llu %llu %llu %llu %d %llu %llu %llu %llu",
848                        (unsigned long long)start,
849                        (unsigned long long)step,
850                        shared->tmp.ios[READ],
851                        shared->tmp.merges[READ],
852                        shared->tmp.sectors[READ],
853                        dm_jiffies_to_msec64(s, shared->tmp.ticks[READ]),
854                        shared->tmp.ios[WRITE],
855                        shared->tmp.merges[WRITE],
856                        shared->tmp.sectors[WRITE],
857                        dm_jiffies_to_msec64(s, shared->tmp.ticks[WRITE]),
858                        dm_stat_in_flight(shared),
859                        dm_jiffies_to_msec64(s, shared->tmp.io_ticks_total),
860                        dm_jiffies_to_msec64(s, shared->tmp.time_in_queue),
861                        dm_jiffies_to_msec64(s, shared->tmp.io_ticks[READ]),
862                        dm_jiffies_to_msec64(s, shared->tmp.io_ticks[WRITE]));
863                 if (s->n_histogram_entries) {
864                         unsigned i;
865                         for (i = 0; i < s->n_histogram_entries + 1; i++) {
866                                 DMEMIT("%s%llu", !i ? " " : ":", shared->tmp.histogram[i]);
867                         }
868                 }
869                 DMEMIT("\n");
870
871                 if (unlikely(sz + 1 >= maxlen))
872                         goto buffer_overflow;
873
874                 cond_resched();
875         }
876
877         if (clear)
878                 __dm_stat_clear(s, idx_start, idx_end, false);
879
880 buffer_overflow:
881         mutex_unlock(&stats->mutex);
882
883         return 1;
884 }
885
886 static int dm_stats_set_aux(struct dm_stats *stats, int id, const char *aux_data)
887 {
888         struct dm_stat *s;
889         const char *new_aux_data;
890
891         mutex_lock(&stats->mutex);
892
893         s = __dm_stats_find(stats, id);
894         if (!s) {
895                 mutex_unlock(&stats->mutex);
896                 return -ENOENT;
897         }
898
899         new_aux_data = kstrdup(aux_data, GFP_KERNEL);
900         if (!new_aux_data) {
901                 mutex_unlock(&stats->mutex);
902                 return -ENOMEM;
903         }
904
905         kfree(s->aux_data);
906         s->aux_data = new_aux_data;
907
908         mutex_unlock(&stats->mutex);
909
910         return 0;
911 }
912
913 static int parse_histogram(const char *h, unsigned *n_histogram_entries,
914                            unsigned long long **histogram_boundaries)
915 {
916         const char *q;
917         unsigned n;
918         unsigned long long last;
919
920         *n_histogram_entries = 1;
921         for (q = h; *q; q++)
922                 if (*q == ',')
923                         (*n_histogram_entries)++;
924
925         *histogram_boundaries = kmalloc(*n_histogram_entries * sizeof(unsigned long long), GFP_KERNEL);
926         if (!*histogram_boundaries)
927                 return -ENOMEM;
928
929         n = 0;
930         last = 0;
931         while (1) {
932                 unsigned long long hi;
933                 int s;
934                 char ch;
935                 s = sscanf(h, "%llu%c", &hi, &ch);
936                 if (!s || (s == 2 && ch != ','))
937                         return -EINVAL;
938                 if (hi <= last)
939                         return -EINVAL;
940                 last = hi;
941                 (*histogram_boundaries)[n] = hi;
942                 if (s == 1)
943                         return 0;
944                 h = strchr(h, ',') + 1;
945                 n++;
946         }
947 }
948
949 static int message_stats_create(struct mapped_device *md,
950                                 unsigned argc, char **argv,
951                                 char *result, unsigned maxlen)
952 {
953         int r;
954         int id;
955         char dummy;
956         unsigned long long start, end, len, step;
957         unsigned divisor;
958         const char *program_id, *aux_data;
959         unsigned stat_flags = 0;
960
961         unsigned n_histogram_entries = 0;
962         unsigned long long *histogram_boundaries = NULL;
963
964         struct dm_arg_set as, as_backup;
965         const char *a;
966         unsigned feature_args;
967
968         /*
969          * Input format:
970          *   <range> <step> [<extra_parameters> <parameters>] [<program_id> [<aux_data>]]
971          */
972
973         if (argc < 3)
974                 goto ret_einval;
975
976         as.argc = argc;
977         as.argv = argv;
978         dm_consume_args(&as, 1);
979
980         a = dm_shift_arg(&as);
981         if (!strcmp(a, "-")) {
982                 start = 0;
983                 len = dm_get_size(md);
984                 if (!len)
985                         len = 1;
986         } else if (sscanf(a, "%llu+%llu%c", &start, &len, &dummy) != 2 ||
987                    start != (sector_t)start || len != (sector_t)len)
988                 goto ret_einval;
989
990         end = start + len;
991         if (start >= end)
992                 goto ret_einval;
993
994         a = dm_shift_arg(&as);
995         if (sscanf(a, "/%u%c", &divisor, &dummy) == 1) {
996                 if (!divisor)
997                         return -EINVAL;
998                 step = end - start;
999                 if (do_div(step, divisor))
1000                         step++;
1001                 if (!step)
1002                         step = 1;
1003         } else if (sscanf(a, "%llu%c", &step, &dummy) != 1 ||
1004                    step != (sector_t)step || !step)
1005                 goto ret_einval;
1006
1007         as_backup = as;
1008         a = dm_shift_arg(&as);
1009         if (a && sscanf(a, "%u%c", &feature_args, &dummy) == 1) {
1010                 while (feature_args--) {
1011                         a = dm_shift_arg(&as);
1012                         if (!a)
1013                                 goto ret_einval;
1014                         if (!strcasecmp(a, "precise_timestamps"))
1015                                 stat_flags |= STAT_PRECISE_TIMESTAMPS;
1016                         else if (!strncasecmp(a, "histogram:", 10)) {
1017                                 if (n_histogram_entries)
1018                                         goto ret_einval;
1019                                 if ((r = parse_histogram(a + 10, &n_histogram_entries, &histogram_boundaries)))
1020                                         goto ret;
1021                         } else
1022                                 goto ret_einval;
1023                 }
1024         } else {
1025                 as = as_backup;
1026         }
1027
1028         program_id = "-";
1029         aux_data = "-";
1030
1031         a = dm_shift_arg(&as);
1032         if (a)
1033                 program_id = a;
1034
1035         a = dm_shift_arg(&as);
1036         if (a)
1037                 aux_data = a;
1038
1039         if (as.argc)
1040                 goto ret_einval;
1041
1042         /*
1043          * If a buffer overflow happens after we created the region,
1044          * it's too late (the userspace would retry with a larger
1045          * buffer, but the region id that caused the overflow is already
1046          * leaked).  So we must detect buffer overflow in advance.
1047          */
1048         snprintf(result, maxlen, "%d", INT_MAX);
1049         if (dm_message_test_buffer_overflow(result, maxlen)) {
1050                 r = 1;
1051                 goto ret;
1052         }
1053
1054         id = dm_stats_create(dm_get_stats(md), start, end, step, stat_flags,
1055                              n_histogram_entries, histogram_boundaries, program_id, aux_data,
1056                              dm_internal_suspend_fast, dm_internal_resume_fast, md);
1057         if (id < 0) {
1058                 r = id;
1059                 goto ret;
1060         }
1061
1062         snprintf(result, maxlen, "%d", id);
1063
1064         r = 1;
1065         goto ret;
1066
1067 ret_einval:
1068         r = -EINVAL;
1069 ret:
1070         kfree(histogram_boundaries);
1071         return r;
1072 }
1073
1074 static int message_stats_delete(struct mapped_device *md,
1075                                 unsigned argc, char **argv)
1076 {
1077         int id;
1078         char dummy;
1079
1080         if (argc != 2)
1081                 return -EINVAL;
1082
1083         if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
1084                 return -EINVAL;
1085
1086         return dm_stats_delete(dm_get_stats(md), id);
1087 }
1088
1089 static int message_stats_clear(struct mapped_device *md,
1090                                unsigned argc, char **argv)
1091 {
1092         int id;
1093         char dummy;
1094
1095         if (argc != 2)
1096                 return -EINVAL;
1097
1098         if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
1099                 return -EINVAL;
1100
1101         return dm_stats_clear(dm_get_stats(md), id);
1102 }
1103
1104 static int message_stats_list(struct mapped_device *md,
1105                               unsigned argc, char **argv,
1106                               char *result, unsigned maxlen)
1107 {
1108         int r;
1109         const char *program = NULL;
1110
1111         if (argc < 1 || argc > 2)
1112                 return -EINVAL;
1113
1114         if (argc > 1) {
1115                 program = kstrdup(argv[1], GFP_KERNEL);
1116                 if (!program)
1117                         return -ENOMEM;
1118         }
1119
1120         r = dm_stats_list(dm_get_stats(md), program, result, maxlen);
1121
1122         kfree(program);
1123
1124         return r;
1125 }
1126
1127 static int message_stats_print(struct mapped_device *md,
1128                                unsigned argc, char **argv, bool clear,
1129                                char *result, unsigned maxlen)
1130 {
1131         int id;
1132         char dummy;
1133         unsigned long idx_start = 0, idx_len = ULONG_MAX;
1134
1135         if (argc != 2 && argc != 4)
1136                 return -EINVAL;
1137
1138         if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
1139                 return -EINVAL;
1140
1141         if (argc > 3) {
1142                 if (strcmp(argv[2], "-") &&
1143                     sscanf(argv[2], "%lu%c", &idx_start, &dummy) != 1)
1144                         return -EINVAL;
1145                 if (strcmp(argv[3], "-") &&
1146                     sscanf(argv[3], "%lu%c", &idx_len, &dummy) != 1)
1147                         return -EINVAL;
1148         }
1149
1150         return dm_stats_print(dm_get_stats(md), id, idx_start, idx_len, clear,
1151                               result, maxlen);
1152 }
1153
1154 static int message_stats_set_aux(struct mapped_device *md,
1155                                  unsigned argc, char **argv)
1156 {
1157         int id;
1158         char dummy;
1159
1160         if (argc != 3)
1161                 return -EINVAL;
1162
1163         if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
1164                 return -EINVAL;
1165
1166         return dm_stats_set_aux(dm_get_stats(md), id, argv[2]);
1167 }
1168
1169 int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
1170                      char *result, unsigned maxlen)
1171 {
1172         int r;
1173
1174         /* All messages here must start with '@' */
1175         if (!strcasecmp(argv[0], "@stats_create"))
1176                 r = message_stats_create(md, argc, argv, result, maxlen);
1177         else if (!strcasecmp(argv[0], "@stats_delete"))
1178                 r = message_stats_delete(md, argc, argv);
1179         else if (!strcasecmp(argv[0], "@stats_clear"))
1180                 r = message_stats_clear(md, argc, argv);
1181         else if (!strcasecmp(argv[0], "@stats_list"))
1182                 r = message_stats_list(md, argc, argv, result, maxlen);
1183         else if (!strcasecmp(argv[0], "@stats_print"))
1184                 r = message_stats_print(md, argc, argv, false, result, maxlen);
1185         else if (!strcasecmp(argv[0], "@stats_print_clear"))
1186                 r = message_stats_print(md, argc, argv, true, result, maxlen);
1187         else if (!strcasecmp(argv[0], "@stats_set_aux"))
1188                 r = message_stats_set_aux(md, argc, argv);
1189         else
1190                 return 2; /* this wasn't a stats message */
1191
1192         if (r == -EINVAL)
1193                 DMWARN("Invalid parameters for message %s", argv[0]);
1194
1195         return r;
1196 }
1197
1198 int __init dm_statistics_init(void)
1199 {
1200         shared_memory_amount = 0;
1201         dm_stat_need_rcu_barrier = 0;
1202         return 0;
1203 }
1204
1205 void dm_statistics_exit(void)
1206 {
1207         if (dm_stat_need_rcu_barrier)
1208                 rcu_barrier();
1209         if (WARN_ON(shared_memory_amount))
1210                 DMCRIT("shared_memory_amount leaked: %lu", shared_memory_amount);
1211 }
1212
1213 module_param_named(stats_current_allocated_bytes, shared_memory_amount, ulong, S_IRUGO);
1214 MODULE_PARM_DESC(stats_current_allocated_bytes, "Memory currently used by statistics");