GNU Linux-libre 5.19-rc6-gnu
[releases.git] / tools / perf / bench / numa.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * numa.c
4  *
5  * numa: Simulate NUMA-sensitive workload and measure their NUMA performance
6  */
7
8 #include <inttypes.h>
9 /* For the CLR_() macros */
10 #include <pthread.h>
11
12 #include <subcmd/parse-options.h>
13 #include "../util/cloexec.h"
14
15 #include "bench.h"
16
17 #include <errno.h>
18 #include <sched.h>
19 #include <stdio.h>
20 #include <assert.h>
21 #include <malloc.h>
22 #include <signal.h>
23 #include <stdlib.h>
24 #include <string.h>
25 #include <unistd.h>
26 #include <sys/mman.h>
27 #include <sys/time.h>
28 #include <sys/resource.h>
29 #include <sys/wait.h>
30 #include <sys/prctl.h>
31 #include <sys/types.h>
32 #include <linux/kernel.h>
33 #include <linux/time64.h>
34 #include <linux/numa.h>
35 #include <linux/zalloc.h>
36
37 #include "../util/header.h"
38 #include <numa.h>
39 #include <numaif.h>
40
41 #ifndef RUSAGE_THREAD
42 # define RUSAGE_THREAD 1
43 #endif
44
45 /*
46  * Regular printout to the terminal, suppressed if -q is specified:
47  */
48 #define tprintf(x...) do { if (g && g->p.show_details >= 0) printf(x); } while (0)
49
50 /*
51  * Debug printf:
52  */
53 #undef dprintf
54 #define dprintf(x...) do { if (g && g->p.show_details >= 1) printf(x); } while (0)
55
56 struct thread_data {
57         int                     curr_cpu;
58         cpu_set_t               *bind_cpumask;
59         int                     bind_node;
60         u8                      *process_data;
61         int                     process_nr;
62         int                     thread_nr;
63         int                     task_nr;
64         unsigned int            loops_done;
65         u64                     val;
66         u64                     runtime_ns;
67         u64                     system_time_ns;
68         u64                     user_time_ns;
69         double                  speed_gbs;
70         pthread_mutex_t         *process_lock;
71 };
72
73 /* Parameters set by options: */
74
75 struct params {
76         /* Startup synchronization: */
77         bool                    serialize_startup;
78
79         /* Task hierarchy: */
80         int                     nr_proc;
81         int                     nr_threads;
82
83         /* Working set sizes: */
84         const char              *mb_global_str;
85         const char              *mb_proc_str;
86         const char              *mb_proc_locked_str;
87         const char              *mb_thread_str;
88
89         double                  mb_global;
90         double                  mb_proc;
91         double                  mb_proc_locked;
92         double                  mb_thread;
93
94         /* Access patterns to the working set: */
95         bool                    data_reads;
96         bool                    data_writes;
97         bool                    data_backwards;
98         bool                    data_zero_memset;
99         bool                    data_rand_walk;
100         u32                     nr_loops;
101         u32                     nr_secs;
102         u32                     sleep_usecs;
103
104         /* Working set initialization: */
105         bool                    init_zero;
106         bool                    init_random;
107         bool                    init_cpu0;
108
109         /* Misc options: */
110         int                     show_details;
111         int                     run_all;
112         int                     thp;
113
114         long                    bytes_global;
115         long                    bytes_process;
116         long                    bytes_process_locked;
117         long                    bytes_thread;
118
119         int                     nr_tasks;
120         bool                    show_quiet;
121
122         bool                    show_convergence;
123         bool                    measure_convergence;
124
125         int                     perturb_secs;
126         int                     nr_cpus;
127         int                     nr_nodes;
128
129         /* Affinity options -C and -N: */
130         char                    *cpu_list_str;
131         char                    *node_list_str;
132 };
133
134
135 /* Global, read-writable area, accessible to all processes and threads: */
136
137 struct global_info {
138         u8                      *data;
139
140         pthread_mutex_t         startup_mutex;
141         pthread_cond_t          startup_cond;
142         int                     nr_tasks_started;
143
144         pthread_mutex_t         start_work_mutex;
145         pthread_cond_t          start_work_cond;
146         int                     nr_tasks_working;
147         bool                    start_work;
148
149         pthread_mutex_t         stop_work_mutex;
150         u64                     bytes_done;
151
152         struct thread_data      *threads;
153
154         /* Convergence latency measurement: */
155         bool                    all_converged;
156         bool                    stop_work;
157
158         int                     print_once;
159
160         struct params           p;
161 };
162
163 static struct global_info       *g = NULL;
164
165 static int parse_cpus_opt(const struct option *opt, const char *arg, int unset);
166 static int parse_nodes_opt(const struct option *opt, const char *arg, int unset);
167
168 struct params p0;
169
170 static const struct option options[] = {
171         OPT_INTEGER('p', "nr_proc"      , &p0.nr_proc,          "number of processes"),
172         OPT_INTEGER('t', "nr_threads"   , &p0.nr_threads,       "number of threads per process"),
173
174         OPT_STRING('G', "mb_global"     , &p0.mb_global_str,    "MB", "global  memory (MBs)"),
175         OPT_STRING('P', "mb_proc"       , &p0.mb_proc_str,      "MB", "process memory (MBs)"),
176         OPT_STRING('L', "mb_proc_locked", &p0.mb_proc_locked_str,"MB", "process serialized/locked memory access (MBs), <= process_memory"),
177         OPT_STRING('T', "mb_thread"     , &p0.mb_thread_str,    "MB", "thread  memory (MBs)"),
178
179         OPT_UINTEGER('l', "nr_loops"    , &p0.nr_loops,         "max number of loops to run (default: unlimited)"),
180         OPT_UINTEGER('s', "nr_secs"     , &p0.nr_secs,          "max number of seconds to run (default: 5 secs)"),
181         OPT_UINTEGER('u', "usleep"      , &p0.sleep_usecs,      "usecs to sleep per loop iteration"),
182
183         OPT_BOOLEAN('R', "data_reads"   , &p0.data_reads,       "access the data via reads (can be mixed with -W)"),
184         OPT_BOOLEAN('W', "data_writes"  , &p0.data_writes,      "access the data via writes (can be mixed with -R)"),
185         OPT_BOOLEAN('B', "data_backwards", &p0.data_backwards,  "access the data backwards as well"),
186         OPT_BOOLEAN('Z', "data_zero_memset", &p0.data_zero_memset,"access the data via glibc bzero only"),
187         OPT_BOOLEAN('r', "data_rand_walk", &p0.data_rand_walk,  "access the data with random (32bit LFSR) walk"),
188
189
190         OPT_BOOLEAN('z', "init_zero"    , &p0.init_zero,        "bzero the initial allocations"),
191         OPT_BOOLEAN('I', "init_random"  , &p0.init_random,      "randomize the contents of the initial allocations"),
192         OPT_BOOLEAN('0', "init_cpu0"    , &p0.init_cpu0,        "do the initial allocations on CPU#0"),
193         OPT_INTEGER('x', "perturb_secs", &p0.perturb_secs,      "perturb thread 0/0 every X secs, to test convergence stability"),
194
195         OPT_INCR   ('d', "show_details" , &p0.show_details,     "Show details"),
196         OPT_INCR   ('a', "all"          , &p0.run_all,          "Run all tests in the suite"),
197         OPT_INTEGER('H', "thp"          , &p0.thp,              "MADV_NOHUGEPAGE < 0 < MADV_HUGEPAGE"),
198         OPT_BOOLEAN('c', "show_convergence", &p0.show_convergence, "show convergence details, "
199                     "convergence is reached when each process (all its threads) is running on a single NUMA node."),
200         OPT_BOOLEAN('m', "measure_convergence", &p0.measure_convergence, "measure convergence latency"),
201         OPT_BOOLEAN('q', "quiet"        , &p0.show_quiet,       "quiet mode"),
202         OPT_BOOLEAN('S', "serialize-startup", &p0.serialize_startup,"serialize thread startup"),
203
204         /* Special option string parsing callbacks: */
205         OPT_CALLBACK('C', "cpus", NULL, "cpu[,cpu2,...cpuN]",
206                         "bind the first N tasks to these specific cpus (the rest is unbound)",
207                         parse_cpus_opt),
208         OPT_CALLBACK('M', "memnodes", NULL, "node[,node2,...nodeN]",
209                         "bind the first N tasks to these specific memory nodes (the rest is unbound)",
210                         parse_nodes_opt),
211         OPT_END()
212 };
213
214 static const char * const bench_numa_usage[] = {
215         "perf bench numa <options>",
216         NULL
217 };
218
219 static const char * const numa_usage[] = {
220         "perf bench numa mem [<options>]",
221         NULL
222 };
223
224 /*
225  * To get number of numa nodes present.
226  */
227 static int nr_numa_nodes(void)
228 {
229         int i, nr_nodes = 0;
230
231         for (i = 0; i < g->p.nr_nodes; i++) {
232                 if (numa_bitmask_isbitset(numa_nodes_ptr, i))
233                         nr_nodes++;
234         }
235
236         return nr_nodes;
237 }
238
239 /*
240  * To check if given numa node is present.
241  */
242 static int is_node_present(int node)
243 {
244         return numa_bitmask_isbitset(numa_nodes_ptr, node);
245 }
246
247 /*
248  * To check given numa node has cpus.
249  */
250 static bool node_has_cpus(int node)
251 {
252         struct bitmask *cpumask = numa_allocate_cpumask();
253         bool ret = false; /* fall back to nocpus */
254         int cpu;
255
256         BUG_ON(!cpumask);
257         if (!numa_node_to_cpus(node, cpumask)) {
258                 for (cpu = 0; cpu < (int)cpumask->size; cpu++) {
259                         if (numa_bitmask_isbitset(cpumask, cpu)) {
260                                 ret = true;
261                                 break;
262                         }
263                 }
264         }
265         numa_free_cpumask(cpumask);
266
267         return ret;
268 }
269
270 static cpu_set_t *bind_to_cpu(int target_cpu)
271 {
272         int nrcpus = numa_num_possible_cpus();
273         cpu_set_t *orig_mask, *mask;
274         size_t size;
275
276         orig_mask = CPU_ALLOC(nrcpus);
277         BUG_ON(!orig_mask);
278         size = CPU_ALLOC_SIZE(nrcpus);
279         CPU_ZERO_S(size, orig_mask);
280
281         if (sched_getaffinity(0, size, orig_mask))
282                 goto err_out;
283
284         mask = CPU_ALLOC(nrcpus);
285         if (!mask)
286                 goto err_out;
287
288         CPU_ZERO_S(size, mask);
289
290         if (target_cpu == -1) {
291                 int cpu;
292
293                 for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
294                         CPU_SET_S(cpu, size, mask);
295         } else {
296                 if (target_cpu < 0 || target_cpu >= g->p.nr_cpus)
297                         goto err;
298
299                 CPU_SET_S(target_cpu, size, mask);
300         }
301
302         if (sched_setaffinity(0, size, mask))
303                 goto err;
304
305         return orig_mask;
306
307 err:
308         CPU_FREE(mask);
309 err_out:
310         CPU_FREE(orig_mask);
311
312         /* BUG_ON due to failure in allocation of orig_mask/mask */
313         BUG_ON(-1);
314         return NULL;
315 }
316
317 static cpu_set_t *bind_to_node(int target_node)
318 {
319         int nrcpus = numa_num_possible_cpus();
320         size_t size;
321         cpu_set_t *orig_mask, *mask;
322         int cpu;
323
324         orig_mask = CPU_ALLOC(nrcpus);
325         BUG_ON(!orig_mask);
326         size = CPU_ALLOC_SIZE(nrcpus);
327         CPU_ZERO_S(size, orig_mask);
328
329         if (sched_getaffinity(0, size, orig_mask))
330                 goto err_out;
331
332         mask = CPU_ALLOC(nrcpus);
333         if (!mask)
334                 goto err_out;
335
336         CPU_ZERO_S(size, mask);
337
338         if (target_node == NUMA_NO_NODE) {
339                 for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
340                         CPU_SET_S(cpu, size, mask);
341         } else {
342                 struct bitmask *cpumask = numa_allocate_cpumask();
343
344                 if (!cpumask)
345                         goto err;
346
347                 if (!numa_node_to_cpus(target_node, cpumask)) {
348                         for (cpu = 0; cpu < (int)cpumask->size; cpu++) {
349                                 if (numa_bitmask_isbitset(cpumask, cpu))
350                                         CPU_SET_S(cpu, size, mask);
351                         }
352                 }
353                 numa_free_cpumask(cpumask);
354         }
355
356         if (sched_setaffinity(0, size, mask))
357                 goto err;
358
359         return orig_mask;
360
361 err:
362         CPU_FREE(mask);
363 err_out:
364         CPU_FREE(orig_mask);
365
366         /* BUG_ON due to failure in allocation of orig_mask/mask */
367         BUG_ON(-1);
368         return NULL;
369 }
370
371 static void bind_to_cpumask(cpu_set_t *mask)
372 {
373         int ret;
374         size_t size = CPU_ALLOC_SIZE(numa_num_possible_cpus());
375
376         ret = sched_setaffinity(0, size, mask);
377         if (ret) {
378                 CPU_FREE(mask);
379                 BUG_ON(ret);
380         }
381 }
382
383 static void mempol_restore(void)
384 {
385         int ret;
386
387         ret = set_mempolicy(MPOL_DEFAULT, NULL, g->p.nr_nodes-1);
388
389         BUG_ON(ret);
390 }
391
392 static void bind_to_memnode(int node)
393 {
394         struct bitmask *node_mask;
395         int ret;
396
397         if (node == NUMA_NO_NODE)
398                 return;
399
400         node_mask = numa_allocate_nodemask();
401         BUG_ON(!node_mask);
402
403         numa_bitmask_clearall(node_mask);
404         numa_bitmask_setbit(node_mask, node);
405
406         ret = set_mempolicy(MPOL_BIND, node_mask->maskp, node_mask->size + 1);
407         dprintf("binding to node %d, mask: %016lx => %d\n", node, *node_mask->maskp, ret);
408
409         numa_bitmask_free(node_mask);
410         BUG_ON(ret);
411 }
412
413 #define HPSIZE (2*1024*1024)
414
415 #define set_taskname(fmt...)                            \
416 do {                                                    \
417         char name[20];                                  \
418                                                         \
419         snprintf(name, 20, fmt);                        \
420         prctl(PR_SET_NAME, name);                       \
421 } while (0)
422
423 static u8 *alloc_data(ssize_t bytes0, int map_flags,
424                       int init_zero, int init_cpu0, int thp, int init_random)
425 {
426         cpu_set_t *orig_mask = NULL;
427         ssize_t bytes;
428         u8 *buf;
429         int ret;
430
431         if (!bytes0)
432                 return NULL;
433
434         /* Allocate and initialize all memory on CPU#0: */
435         if (init_cpu0) {
436                 int node = numa_node_of_cpu(0);
437
438                 orig_mask = bind_to_node(node);
439                 bind_to_memnode(node);
440         }
441
442         bytes = bytes0 + HPSIZE;
443
444         buf = (void *)mmap(0, bytes, PROT_READ|PROT_WRITE, MAP_ANON|map_flags, -1, 0);
445         BUG_ON(buf == (void *)-1);
446
447         if (map_flags == MAP_PRIVATE) {
448                 if (thp > 0) {
449                         ret = madvise(buf, bytes, MADV_HUGEPAGE);
450                         if (ret && !g->print_once) {
451                                 g->print_once = 1;
452                                 printf("WARNING: Could not enable THP - do: 'echo madvise > /sys/kernel/mm/transparent_hugepage/enabled'\n");
453                         }
454                 }
455                 if (thp < 0) {
456                         ret = madvise(buf, bytes, MADV_NOHUGEPAGE);
457                         if (ret && !g->print_once) {
458                                 g->print_once = 1;
459                                 printf("WARNING: Could not disable THP: run a CONFIG_TRANSPARENT_HUGEPAGE kernel?\n");
460                         }
461                 }
462         }
463
464         if (init_zero) {
465                 bzero(buf, bytes);
466         } else {
467                 /* Initialize random contents, different in each word: */
468                 if (init_random) {
469                         u64 *wbuf = (void *)buf;
470                         long off = rand();
471                         long i;
472
473                         for (i = 0; i < bytes/8; i++)
474                                 wbuf[i] = i + off;
475                 }
476         }
477
478         /* Align to 2MB boundary: */
479         buf = (void *)(((unsigned long)buf + HPSIZE-1) & ~(HPSIZE-1));
480
481         /* Restore affinity: */
482         if (init_cpu0) {
483                 bind_to_cpumask(orig_mask);
484                 CPU_FREE(orig_mask);
485                 mempol_restore();
486         }
487
488         return buf;
489 }
490
491 static void free_data(void *data, ssize_t bytes)
492 {
493         int ret;
494
495         if (!data)
496                 return;
497
498         ret = munmap(data, bytes);
499         BUG_ON(ret);
500 }
501
502 /*
503  * Create a shared memory buffer that can be shared between processes, zeroed:
504  */
505 static void * zalloc_shared_data(ssize_t bytes)
506 {
507         return alloc_data(bytes, MAP_SHARED, 1, g->p.init_cpu0,  g->p.thp, g->p.init_random);
508 }
509
510 /*
511  * Create a shared memory buffer that can be shared between processes:
512  */
513 static void * setup_shared_data(ssize_t bytes)
514 {
515         return alloc_data(bytes, MAP_SHARED, 0, g->p.init_cpu0,  g->p.thp, g->p.init_random);
516 }
517
518 /*
519  * Allocate process-local memory - this will either be shared between
520  * threads of this process, or only be accessed by this thread:
521  */
522 static void * setup_private_data(ssize_t bytes)
523 {
524         return alloc_data(bytes, MAP_PRIVATE, 0, g->p.init_cpu0,  g->p.thp, g->p.init_random);
525 }
526
527 /*
528  * Return a process-shared (global) mutex:
529  */
530 static void init_global_mutex(pthread_mutex_t *mutex)
531 {
532         pthread_mutexattr_t attr;
533
534         pthread_mutexattr_init(&attr);
535         pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED);
536         pthread_mutex_init(mutex, &attr);
537 }
538
539 /*
540  * Return a process-shared (global) condition variable:
541  */
542 static void init_global_cond(pthread_cond_t *cond)
543 {
544         pthread_condattr_t attr;
545
546         pthread_condattr_init(&attr);
547         pthread_condattr_setpshared(&attr, PTHREAD_PROCESS_SHARED);
548         pthread_cond_init(cond, &attr);
549 }
550
551 static int parse_cpu_list(const char *arg)
552 {
553         p0.cpu_list_str = strdup(arg);
554
555         dprintf("got CPU list: {%s}\n", p0.cpu_list_str);
556
557         return 0;
558 }
559
560 static int parse_setup_cpu_list(void)
561 {
562         struct thread_data *td;
563         char *str0, *str;
564         int t;
565
566         if (!g->p.cpu_list_str)
567                 return 0;
568
569         dprintf("g->p.nr_tasks: %d\n", g->p.nr_tasks);
570
571         str0 = str = strdup(g->p.cpu_list_str);
572         t = 0;
573
574         BUG_ON(!str);
575
576         tprintf("# binding tasks to CPUs:\n");
577         tprintf("#  ");
578
579         while (true) {
580                 int bind_cpu, bind_cpu_0, bind_cpu_1;
581                 char *tok, *tok_end, *tok_step, *tok_len, *tok_mul;
582                 int bind_len;
583                 int step;
584                 int mul;
585
586                 tok = strsep(&str, ",");
587                 if (!tok)
588                         break;
589
590                 tok_end = strstr(tok, "-");
591
592                 dprintf("\ntoken: {%s}, end: {%s}\n", tok, tok_end);
593                 if (!tok_end) {
594                         /* Single CPU specified: */
595                         bind_cpu_0 = bind_cpu_1 = atol(tok);
596                 } else {
597                         /* CPU range specified (for example: "5-11"): */
598                         bind_cpu_0 = atol(tok);
599                         bind_cpu_1 = atol(tok_end + 1);
600                 }
601
602                 step = 1;
603                 tok_step = strstr(tok, "#");
604                 if (tok_step) {
605                         step = atol(tok_step + 1);
606                         BUG_ON(step <= 0 || step >= g->p.nr_cpus);
607                 }
608
609                 /*
610                  * Mask length.
611                  * Eg: "--cpus 8_4-16#4" means: '--cpus 8_4,12_4,16_4',
612                  * where the _4 means the next 4 CPUs are allowed.
613                  */
614                 bind_len = 1;
615                 tok_len = strstr(tok, "_");
616                 if (tok_len) {
617                         bind_len = atol(tok_len + 1);
618                         BUG_ON(bind_len <= 0 || bind_len > g->p.nr_cpus);
619                 }
620
621                 /* Multiplicator shortcut, "0x8" is a shortcut for: "0,0,0,0,0,0,0,0" */
622                 mul = 1;
623                 tok_mul = strstr(tok, "x");
624                 if (tok_mul) {
625                         mul = atol(tok_mul + 1);
626                         BUG_ON(mul <= 0);
627                 }
628
629                 dprintf("CPUs: %d_%d-%d#%dx%d\n", bind_cpu_0, bind_len, bind_cpu_1, step, mul);
630
631                 if (bind_cpu_0 >= g->p.nr_cpus || bind_cpu_1 >= g->p.nr_cpus) {
632                         printf("\nTest not applicable, system has only %d CPUs.\n", g->p.nr_cpus);
633                         return -1;
634                 }
635
636                 if (is_cpu_online(bind_cpu_0) != 1 || is_cpu_online(bind_cpu_1) != 1) {
637                         printf("\nTest not applicable, bind_cpu_0 or bind_cpu_1 is offline\n");
638                         return -1;
639                 }
640
641                 BUG_ON(bind_cpu_0 < 0 || bind_cpu_1 < 0);
642                 BUG_ON(bind_cpu_0 > bind_cpu_1);
643
644                 for (bind_cpu = bind_cpu_0; bind_cpu <= bind_cpu_1; bind_cpu += step) {
645                         size_t size = CPU_ALLOC_SIZE(g->p.nr_cpus);
646                         int i;
647
648                         for (i = 0; i < mul; i++) {
649                                 int cpu;
650
651                                 if (t >= g->p.nr_tasks) {
652                                         printf("\n# NOTE: ignoring bind CPUs starting at CPU#%d\n #", bind_cpu);
653                                         goto out;
654                                 }
655                                 td = g->threads + t;
656
657                                 if (t)
658                                         tprintf(",");
659                                 if (bind_len > 1) {
660                                         tprintf("%2d/%d", bind_cpu, bind_len);
661                                 } else {
662                                         tprintf("%2d", bind_cpu);
663                                 }
664
665                                 td->bind_cpumask = CPU_ALLOC(g->p.nr_cpus);
666                                 BUG_ON(!td->bind_cpumask);
667                                 CPU_ZERO_S(size, td->bind_cpumask);
668                                 for (cpu = bind_cpu; cpu < bind_cpu+bind_len; cpu++) {
669                                         if (cpu < 0 || cpu >= g->p.nr_cpus) {
670                                                 CPU_FREE(td->bind_cpumask);
671                                                 BUG_ON(-1);
672                                         }
673                                         CPU_SET_S(cpu, size, td->bind_cpumask);
674                                 }
675                                 t++;
676                         }
677                 }
678         }
679 out:
680
681         tprintf("\n");
682
683         if (t < g->p.nr_tasks)
684                 printf("# NOTE: %d tasks bound, %d tasks unbound\n", t, g->p.nr_tasks - t);
685
686         free(str0);
687         return 0;
688 }
689
690 static int parse_cpus_opt(const struct option *opt __maybe_unused,
691                           const char *arg, int unset __maybe_unused)
692 {
693         if (!arg)
694                 return -1;
695
696         return parse_cpu_list(arg);
697 }
698
699 static int parse_node_list(const char *arg)
700 {
701         p0.node_list_str = strdup(arg);
702
703         dprintf("got NODE list: {%s}\n", p0.node_list_str);
704
705         return 0;
706 }
707
708 static int parse_setup_node_list(void)
709 {
710         struct thread_data *td;
711         char *str0, *str;
712         int t;
713
714         if (!g->p.node_list_str)
715                 return 0;
716
717         dprintf("g->p.nr_tasks: %d\n", g->p.nr_tasks);
718
719         str0 = str = strdup(g->p.node_list_str);
720         t = 0;
721
722         BUG_ON(!str);
723
724         tprintf("# binding tasks to NODEs:\n");
725         tprintf("# ");
726
727         while (true) {
728                 int bind_node, bind_node_0, bind_node_1;
729                 char *tok, *tok_end, *tok_step, *tok_mul;
730                 int step;
731                 int mul;
732
733                 tok = strsep(&str, ",");
734                 if (!tok)
735                         break;
736
737                 tok_end = strstr(tok, "-");
738
739                 dprintf("\ntoken: {%s}, end: {%s}\n", tok, tok_end);
740                 if (!tok_end) {
741                         /* Single NODE specified: */
742                         bind_node_0 = bind_node_1 = atol(tok);
743                 } else {
744                         /* NODE range specified (for example: "5-11"): */
745                         bind_node_0 = atol(tok);
746                         bind_node_1 = atol(tok_end + 1);
747                 }
748
749                 step = 1;
750                 tok_step = strstr(tok, "#");
751                 if (tok_step) {
752                         step = atol(tok_step + 1);
753                         BUG_ON(step <= 0 || step >= g->p.nr_nodes);
754                 }
755
756                 /* Multiplicator shortcut, "0x8" is a shortcut for: "0,0,0,0,0,0,0,0" */
757                 mul = 1;
758                 tok_mul = strstr(tok, "x");
759                 if (tok_mul) {
760                         mul = atol(tok_mul + 1);
761                         BUG_ON(mul <= 0);
762                 }
763
764                 dprintf("NODEs: %d-%d #%d\n", bind_node_0, bind_node_1, step);
765
766                 if (bind_node_0 >= g->p.nr_nodes || bind_node_1 >= g->p.nr_nodes) {
767                         printf("\nTest not applicable, system has only %d nodes.\n", g->p.nr_nodes);
768                         return -1;
769                 }
770
771                 BUG_ON(bind_node_0 < 0 || bind_node_1 < 0);
772                 BUG_ON(bind_node_0 > bind_node_1);
773
774                 for (bind_node = bind_node_0; bind_node <= bind_node_1; bind_node += step) {
775                         int i;
776
777                         for (i = 0; i < mul; i++) {
778                                 if (t >= g->p.nr_tasks || !node_has_cpus(bind_node)) {
779                                         printf("\n# NOTE: ignoring bind NODEs starting at NODE#%d\n", bind_node);
780                                         goto out;
781                                 }
782                                 td = g->threads + t;
783
784                                 if (!t)
785                                         tprintf(" %2d", bind_node);
786                                 else
787                                         tprintf(",%2d", bind_node);
788
789                                 td->bind_node = bind_node;
790                                 t++;
791                         }
792                 }
793         }
794 out:
795
796         tprintf("\n");
797
798         if (t < g->p.nr_tasks)
799                 printf("# NOTE: %d tasks mem-bound, %d tasks unbound\n", t, g->p.nr_tasks - t);
800
801         free(str0);
802         return 0;
803 }
804
805 static int parse_nodes_opt(const struct option *opt __maybe_unused,
806                           const char *arg, int unset __maybe_unused)
807 {
808         if (!arg)
809                 return -1;
810
811         return parse_node_list(arg);
812 }
813
814 static inline uint32_t lfsr_32(uint32_t lfsr)
815 {
816         const uint32_t taps = BIT(1) | BIT(5) | BIT(6) | BIT(31);
817         return (lfsr>>1) ^ ((0x0u - (lfsr & 0x1u)) & taps);
818 }
819
820 /*
821  * Make sure there's real data dependency to RAM (when read
822  * accesses are enabled), so the compiler, the CPU and the
823  * kernel (KSM, zero page, etc.) cannot optimize away RAM
824  * accesses:
825  */
826 static inline u64 access_data(u64 *data, u64 val)
827 {
828         if (g->p.data_reads)
829                 val += *data;
830         if (g->p.data_writes)
831                 *data = val + 1;
832         return val;
833 }
834
835 /*
836  * The worker process does two types of work, a forwards going
837  * loop and a backwards going loop.
838  *
839  * We do this so that on multiprocessor systems we do not create
840  * a 'train' of processing, with highly synchronized processes,
841  * skewing the whole benchmark.
842  */
843 static u64 do_work(u8 *__data, long bytes, int nr, int nr_max, int loop, u64 val)
844 {
845         long words = bytes/sizeof(u64);
846         u64 *data = (void *)__data;
847         long chunk_0, chunk_1;
848         u64 *d0, *d, *d1;
849         long off;
850         long i;
851
852         BUG_ON(!data && words);
853         BUG_ON(data && !words);
854
855         if (!data)
856                 return val;
857
858         /* Very simple memset() work variant: */
859         if (g->p.data_zero_memset && !g->p.data_rand_walk) {
860                 bzero(data, bytes);
861                 return val;
862         }
863
864         /* Spread out by PID/TID nr and by loop nr: */
865         chunk_0 = words/nr_max;
866         chunk_1 = words/g->p.nr_loops;
867         off = nr*chunk_0 + loop*chunk_1;
868
869         while (off >= words)
870                 off -= words;
871
872         if (g->p.data_rand_walk) {
873                 u32 lfsr = nr + loop + val;
874                 int j;
875
876                 for (i = 0; i < words/1024; i++) {
877                         long start, end;
878
879                         lfsr = lfsr_32(lfsr);
880
881                         start = lfsr % words;
882                         end = min(start + 1024, words-1);
883
884                         if (g->p.data_zero_memset) {
885                                 bzero(data + start, (end-start) * sizeof(u64));
886                         } else {
887                                 for (j = start; j < end; j++)
888                                         val = access_data(data + j, val);
889                         }
890                 }
891         } else if (!g->p.data_backwards || (nr + loop) & 1) {
892                 /* Process data forwards: */
893
894                 d0 = data + off;
895                 d  = data + off + 1;
896                 d1 = data + words;
897
898                 for (;;) {
899                         if (unlikely(d >= d1))
900                                 d = data;
901                         if (unlikely(d == d0))
902                                 break;
903
904                         val = access_data(d, val);
905
906                         d++;
907                 }
908         } else {
909                 /* Process data backwards: */
910
911                 d0 = data + off;
912                 d  = data + off - 1;
913                 d1 = data + words;
914
915                 for (;;) {
916                         if (unlikely(d < data))
917                                 d = data + words-1;
918                         if (unlikely(d == d0))
919                                 break;
920
921                         val = access_data(d, val);
922
923                         d--;
924                 }
925         }
926
927         return val;
928 }
929
930 static void update_curr_cpu(int task_nr, unsigned long bytes_worked)
931 {
932         unsigned int cpu;
933
934         cpu = sched_getcpu();
935
936         g->threads[task_nr].curr_cpu = cpu;
937         prctl(0, bytes_worked);
938 }
939
940 /*
941  * Count the number of nodes a process's threads
942  * are spread out on.
943  *
944  * A count of 1 means that the process is compressed
945  * to a single node. A count of g->p.nr_nodes means it's
946  * spread out on the whole system.
947  */
948 static int count_process_nodes(int process_nr)
949 {
950         char *node_present;
951         int nodes;
952         int n, t;
953
954         node_present = (char *)malloc(g->p.nr_nodes * sizeof(char));
955         BUG_ON(!node_present);
956         for (nodes = 0; nodes < g->p.nr_nodes; nodes++)
957                 node_present[nodes] = 0;
958
959         for (t = 0; t < g->p.nr_threads; t++) {
960                 struct thread_data *td;
961                 int task_nr;
962                 int node;
963
964                 task_nr = process_nr*g->p.nr_threads + t;
965                 td = g->threads + task_nr;
966
967                 node = numa_node_of_cpu(td->curr_cpu);
968                 if (node < 0) /* curr_cpu was likely still -1 */ {
969                         free(node_present);
970                         return 0;
971                 }
972
973                 node_present[node] = 1;
974         }
975
976         nodes = 0;
977
978         for (n = 0; n < g->p.nr_nodes; n++)
979                 nodes += node_present[n];
980
981         free(node_present);
982         return nodes;
983 }
984
985 /*
986  * Count the number of distinct process-threads a node contains.
987  *
988  * A count of 1 means that the node contains only a single
989  * process. If all nodes on the system contain at most one
990  * process then we are well-converged.
991  */
992 static int count_node_processes(int node)
993 {
994         int processes = 0;
995         int t, p;
996
997         for (p = 0; p < g->p.nr_proc; p++) {
998                 for (t = 0; t < g->p.nr_threads; t++) {
999                         struct thread_data *td;
1000                         int task_nr;
1001                         int n;
1002
1003                         task_nr = p*g->p.nr_threads + t;
1004                         td = g->threads + task_nr;
1005
1006                         n = numa_node_of_cpu(td->curr_cpu);
1007                         if (n == node) {
1008                                 processes++;
1009                                 break;
1010                         }
1011                 }
1012         }
1013
1014         return processes;
1015 }
1016
1017 static void calc_convergence_compression(int *strong)
1018 {
1019         unsigned int nodes_min, nodes_max;
1020         int p;
1021
1022         nodes_min = -1;
1023         nodes_max =  0;
1024
1025         for (p = 0; p < g->p.nr_proc; p++) {
1026                 unsigned int nodes = count_process_nodes(p);
1027
1028                 if (!nodes) {
1029                         *strong = 0;
1030                         return;
1031                 }
1032
1033                 nodes_min = min(nodes, nodes_min);
1034                 nodes_max = max(nodes, nodes_max);
1035         }
1036
1037         /* Strong convergence: all threads compress on a single node: */
1038         if (nodes_min == 1 && nodes_max == 1) {
1039                 *strong = 1;
1040         } else {
1041                 *strong = 0;
1042                 tprintf(" {%d-%d}", nodes_min, nodes_max);
1043         }
1044 }
1045
1046 static void calc_convergence(double runtime_ns_max, double *convergence)
1047 {
1048         unsigned int loops_done_min, loops_done_max;
1049         int process_groups;
1050         int *nodes;
1051         int distance;
1052         int nr_min;
1053         int nr_max;
1054         int strong;
1055         int sum;
1056         int nr;
1057         int node;
1058         int cpu;
1059         int t;
1060
1061         if (!g->p.show_convergence && !g->p.measure_convergence)
1062                 return;
1063
1064         nodes = (int *)malloc(g->p.nr_nodes * sizeof(int));
1065         BUG_ON(!nodes);
1066         for (node = 0; node < g->p.nr_nodes; node++)
1067                 nodes[node] = 0;
1068
1069         loops_done_min = -1;
1070         loops_done_max = 0;
1071
1072         for (t = 0; t < g->p.nr_tasks; t++) {
1073                 struct thread_data *td = g->threads + t;
1074                 unsigned int loops_done;
1075
1076                 cpu = td->curr_cpu;
1077
1078                 /* Not all threads have written it yet: */
1079                 if (cpu < 0)
1080                         continue;
1081
1082                 node = numa_node_of_cpu(cpu);
1083
1084                 nodes[node]++;
1085
1086                 loops_done = td->loops_done;
1087                 loops_done_min = min(loops_done, loops_done_min);
1088                 loops_done_max = max(loops_done, loops_done_max);
1089         }
1090
1091         nr_max = 0;
1092         nr_min = g->p.nr_tasks;
1093         sum = 0;
1094
1095         for (node = 0; node < g->p.nr_nodes; node++) {
1096                 if (!is_node_present(node))
1097                         continue;
1098                 nr = nodes[node];
1099                 nr_min = min(nr, nr_min);
1100                 nr_max = max(nr, nr_max);
1101                 sum += nr;
1102         }
1103         BUG_ON(nr_min > nr_max);
1104
1105         BUG_ON(sum > g->p.nr_tasks);
1106
1107         if (0 && (sum < g->p.nr_tasks)) {
1108                 free(nodes);
1109                 return;
1110         }
1111
1112         /*
1113          * Count the number of distinct process groups present
1114          * on nodes - when we are converged this will decrease
1115          * to g->p.nr_proc:
1116          */
1117         process_groups = 0;
1118
1119         for (node = 0; node < g->p.nr_nodes; node++) {
1120                 int processes;
1121
1122                 if (!is_node_present(node))
1123                         continue;
1124                 processes = count_node_processes(node);
1125                 nr = nodes[node];
1126                 tprintf(" %2d/%-2d", nr, processes);
1127
1128                 process_groups += processes;
1129         }
1130
1131         distance = nr_max - nr_min;
1132
1133         tprintf(" [%2d/%-2d]", distance, process_groups);
1134
1135         tprintf(" l:%3d-%-3d (%3d)",
1136                 loops_done_min, loops_done_max, loops_done_max-loops_done_min);
1137
1138         if (loops_done_min && loops_done_max) {
1139                 double skew = 1.0 - (double)loops_done_min/loops_done_max;
1140
1141                 tprintf(" [%4.1f%%]", skew * 100.0);
1142         }
1143
1144         calc_convergence_compression(&strong);
1145
1146         if (strong && process_groups == g->p.nr_proc) {
1147                 if (!*convergence) {
1148                         *convergence = runtime_ns_max;
1149                         tprintf(" (%6.1fs converged)\n", *convergence / NSEC_PER_SEC);
1150                         if (g->p.measure_convergence) {
1151                                 g->all_converged = true;
1152                                 g->stop_work = true;
1153                         }
1154                 }
1155         } else {
1156                 if (*convergence) {
1157                         tprintf(" (%6.1fs de-converged)", runtime_ns_max / NSEC_PER_SEC);
1158                         *convergence = 0;
1159                 }
1160                 tprintf("\n");
1161         }
1162
1163         free(nodes);
1164 }
1165
1166 static void show_summary(double runtime_ns_max, int l, double *convergence)
1167 {
1168         tprintf("\r #  %5.1f%%  [%.1f mins]",
1169                 (double)(l+1)/g->p.nr_loops*100.0, runtime_ns_max / NSEC_PER_SEC / 60.0);
1170
1171         calc_convergence(runtime_ns_max, convergence);
1172
1173         if (g->p.show_details >= 0)
1174                 fflush(stdout);
1175 }
1176
1177 static void *worker_thread(void *__tdata)
1178 {
1179         struct thread_data *td = __tdata;
1180         struct timeval start0, start, stop, diff;
1181         int process_nr = td->process_nr;
1182         int thread_nr = td->thread_nr;
1183         unsigned long last_perturbance;
1184         int task_nr = td->task_nr;
1185         int details = g->p.show_details;
1186         int first_task, last_task;
1187         double convergence = 0;
1188         u64 val = td->val;
1189         double runtime_ns_max;
1190         u8 *global_data;
1191         u8 *process_data;
1192         u8 *thread_data;
1193         u64 bytes_done, secs;
1194         long work_done;
1195         u32 l;
1196         struct rusage rusage;
1197
1198         bind_to_cpumask(td->bind_cpumask);
1199         bind_to_memnode(td->bind_node);
1200
1201         set_taskname("thread %d/%d", process_nr, thread_nr);
1202
1203         global_data = g->data;
1204         process_data = td->process_data;
1205         thread_data = setup_private_data(g->p.bytes_thread);
1206
1207         bytes_done = 0;
1208
1209         last_task = 0;
1210         if (process_nr == g->p.nr_proc-1 && thread_nr == g->p.nr_threads-1)
1211                 last_task = 1;
1212
1213         first_task = 0;
1214         if (process_nr == 0 && thread_nr == 0)
1215                 first_task = 1;
1216
1217         if (details >= 2) {
1218                 printf("#  thread %2d / %2d global mem: %p, process mem: %p, thread mem: %p\n",
1219                         process_nr, thread_nr, global_data, process_data, thread_data);
1220         }
1221
1222         if (g->p.serialize_startup) {
1223                 pthread_mutex_lock(&g->startup_mutex);
1224                 g->nr_tasks_started++;
1225                 /* The last thread wakes the main process. */
1226                 if (g->nr_tasks_started == g->p.nr_tasks)
1227                         pthread_cond_signal(&g->startup_cond);
1228
1229                 pthread_mutex_unlock(&g->startup_mutex);
1230
1231                 /* Here we will wait for the main process to start us all at once: */
1232                 pthread_mutex_lock(&g->start_work_mutex);
1233                 g->start_work = false;
1234                 g->nr_tasks_working++;
1235                 while (!g->start_work)
1236                         pthread_cond_wait(&g->start_work_cond, &g->start_work_mutex);
1237
1238                 pthread_mutex_unlock(&g->start_work_mutex);
1239         }
1240
1241         gettimeofday(&start0, NULL);
1242
1243         start = stop = start0;
1244         last_perturbance = start.tv_sec;
1245
1246         for (l = 0; l < g->p.nr_loops; l++) {
1247                 start = stop;
1248
1249                 if (g->stop_work)
1250                         break;
1251
1252                 val += do_work(global_data,  g->p.bytes_global,  process_nr, g->p.nr_proc,      l, val);
1253                 val += do_work(process_data, g->p.bytes_process, thread_nr,  g->p.nr_threads,   l, val);
1254                 val += do_work(thread_data,  g->p.bytes_thread,  0,          1,         l, val);
1255
1256                 if (g->p.sleep_usecs) {
1257                         pthread_mutex_lock(td->process_lock);
1258                         usleep(g->p.sleep_usecs);
1259                         pthread_mutex_unlock(td->process_lock);
1260                 }
1261                 /*
1262                  * Amount of work to be done under a process-global lock:
1263                  */
1264                 if (g->p.bytes_process_locked) {
1265                         pthread_mutex_lock(td->process_lock);
1266                         val += do_work(process_data, g->p.bytes_process_locked, thread_nr,  g->p.nr_threads,    l, val);
1267                         pthread_mutex_unlock(td->process_lock);
1268                 }
1269
1270                 work_done = g->p.bytes_global + g->p.bytes_process +
1271                             g->p.bytes_process_locked + g->p.bytes_thread;
1272
1273                 update_curr_cpu(task_nr, work_done);
1274                 bytes_done += work_done;
1275
1276                 if (details < 0 && !g->p.perturb_secs && !g->p.measure_convergence && !g->p.nr_secs)
1277                         continue;
1278
1279                 td->loops_done = l;
1280
1281                 gettimeofday(&stop, NULL);
1282
1283                 /* Check whether our max runtime timed out: */
1284                 if (g->p.nr_secs) {
1285                         timersub(&stop, &start0, &diff);
1286                         if ((u32)diff.tv_sec >= g->p.nr_secs) {
1287                                 g->stop_work = true;
1288                                 break;
1289                         }
1290                 }
1291
1292                 /* Update the summary at most once per second: */
1293                 if (start.tv_sec == stop.tv_sec)
1294                         continue;
1295
1296                 /*
1297                  * Perturb the first task's equilibrium every g->p.perturb_secs seconds,
1298                  * by migrating to CPU#0:
1299                  */
1300                 if (first_task && g->p.perturb_secs && (int)(stop.tv_sec - last_perturbance) >= g->p.perturb_secs) {
1301                         cpu_set_t *orig_mask;
1302                         int target_cpu;
1303                         int this_cpu;
1304
1305                         last_perturbance = stop.tv_sec;
1306
1307                         /*
1308                          * Depending on where we are running, move into
1309                          * the other half of the system, to create some
1310                          * real disturbance:
1311                          */
1312                         this_cpu = g->threads[task_nr].curr_cpu;
1313                         if (this_cpu < g->p.nr_cpus/2)
1314                                 target_cpu = g->p.nr_cpus-1;
1315                         else
1316                                 target_cpu = 0;
1317
1318                         orig_mask = bind_to_cpu(target_cpu);
1319
1320                         /* Here we are running on the target CPU already */
1321                         if (details >= 1)
1322                                 printf(" (injecting perturbalance, moved to CPU#%d)\n", target_cpu);
1323
1324                         bind_to_cpumask(orig_mask);
1325                         CPU_FREE(orig_mask);
1326                 }
1327
1328                 if (details >= 3) {
1329                         timersub(&stop, &start, &diff);
1330                         runtime_ns_max = diff.tv_sec * NSEC_PER_SEC;
1331                         runtime_ns_max += diff.tv_usec * NSEC_PER_USEC;
1332
1333                         if (details >= 0) {
1334                                 printf(" #%2d / %2d: %14.2lf nsecs/op [val: %016"PRIx64"]\n",
1335                                         process_nr, thread_nr, runtime_ns_max / bytes_done, val);
1336                         }
1337                         fflush(stdout);
1338                 }
1339                 if (!last_task)
1340                         continue;
1341
1342                 timersub(&stop, &start0, &diff);
1343                 runtime_ns_max = diff.tv_sec * NSEC_PER_SEC;
1344                 runtime_ns_max += diff.tv_usec * NSEC_PER_USEC;
1345
1346                 show_summary(runtime_ns_max, l, &convergence);
1347         }
1348
1349         gettimeofday(&stop, NULL);
1350         timersub(&stop, &start0, &diff);
1351         td->runtime_ns = diff.tv_sec * NSEC_PER_SEC;
1352         td->runtime_ns += diff.tv_usec * NSEC_PER_USEC;
1353         secs = td->runtime_ns / NSEC_PER_SEC;
1354         td->speed_gbs = secs ? bytes_done / secs / 1e9 : 0;
1355
1356         getrusage(RUSAGE_THREAD, &rusage);
1357         td->system_time_ns = rusage.ru_stime.tv_sec * NSEC_PER_SEC;
1358         td->system_time_ns += rusage.ru_stime.tv_usec * NSEC_PER_USEC;
1359         td->user_time_ns = rusage.ru_utime.tv_sec * NSEC_PER_SEC;
1360         td->user_time_ns += rusage.ru_utime.tv_usec * NSEC_PER_USEC;
1361
1362         free_data(thread_data, g->p.bytes_thread);
1363
1364         pthread_mutex_lock(&g->stop_work_mutex);
1365         g->bytes_done += bytes_done;
1366         pthread_mutex_unlock(&g->stop_work_mutex);
1367
1368         return NULL;
1369 }
1370
1371 /*
1372  * A worker process starts a couple of threads:
1373  */
1374 static void worker_process(int process_nr)
1375 {
1376         pthread_mutex_t process_lock;
1377         struct thread_data *td;
1378         pthread_t *pthreads;
1379         u8 *process_data;
1380         int task_nr;
1381         int ret;
1382         int t;
1383
1384         pthread_mutex_init(&process_lock, NULL);
1385         set_taskname("process %d", process_nr);
1386
1387         /*
1388          * Pick up the memory policy and the CPU binding of our first thread,
1389          * so that we initialize memory accordingly:
1390          */
1391         task_nr = process_nr*g->p.nr_threads;
1392         td = g->threads + task_nr;
1393
1394         bind_to_memnode(td->bind_node);
1395         bind_to_cpumask(td->bind_cpumask);
1396
1397         pthreads = zalloc(g->p.nr_threads * sizeof(pthread_t));
1398         process_data = setup_private_data(g->p.bytes_process);
1399
1400         if (g->p.show_details >= 3) {
1401                 printf(" # process %2d global mem: %p, process mem: %p\n",
1402                         process_nr, g->data, process_data);
1403         }
1404
1405         for (t = 0; t < g->p.nr_threads; t++) {
1406                 task_nr = process_nr*g->p.nr_threads + t;
1407                 td = g->threads + task_nr;
1408
1409                 td->process_data = process_data;
1410                 td->process_nr   = process_nr;
1411                 td->thread_nr    = t;
1412                 td->task_nr      = task_nr;
1413                 td->val          = rand();
1414                 td->curr_cpu     = -1;
1415                 td->process_lock = &process_lock;
1416
1417                 ret = pthread_create(pthreads + t, NULL, worker_thread, td);
1418                 BUG_ON(ret);
1419         }
1420
1421         for (t = 0; t < g->p.nr_threads; t++) {
1422                 ret = pthread_join(pthreads[t], NULL);
1423                 BUG_ON(ret);
1424         }
1425
1426         free_data(process_data, g->p.bytes_process);
1427         free(pthreads);
1428 }
1429
1430 static void print_summary(void)
1431 {
1432         if (g->p.show_details < 0)
1433                 return;
1434
1435         printf("\n ###\n");
1436         printf(" # %d %s will execute (on %d nodes, %d CPUs):\n",
1437                 g->p.nr_tasks, g->p.nr_tasks == 1 ? "task" : "tasks", nr_numa_nodes(), g->p.nr_cpus);
1438         printf(" #      %5dx %5ldMB global  shared mem operations\n",
1439                         g->p.nr_loops, g->p.bytes_global/1024/1024);
1440         printf(" #      %5dx %5ldMB process shared mem operations\n",
1441                         g->p.nr_loops, g->p.bytes_process/1024/1024);
1442         printf(" #      %5dx %5ldMB thread  local  mem operations\n",
1443                         g->p.nr_loops, g->p.bytes_thread/1024/1024);
1444
1445         printf(" ###\n");
1446
1447         printf("\n ###\n"); fflush(stdout);
1448 }
1449
1450 static void init_thread_data(void)
1451 {
1452         ssize_t size = sizeof(*g->threads)*g->p.nr_tasks;
1453         int t;
1454
1455         g->threads = zalloc_shared_data(size);
1456
1457         for (t = 0; t < g->p.nr_tasks; t++) {
1458                 struct thread_data *td = g->threads + t;
1459                 size_t cpuset_size = CPU_ALLOC_SIZE(g->p.nr_cpus);
1460                 int cpu;
1461
1462                 /* Allow all nodes by default: */
1463                 td->bind_node = NUMA_NO_NODE;
1464
1465                 /* Allow all CPUs by default: */
1466                 td->bind_cpumask = CPU_ALLOC(g->p.nr_cpus);
1467                 BUG_ON(!td->bind_cpumask);
1468                 CPU_ZERO_S(cpuset_size, td->bind_cpumask);
1469                 for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
1470                         CPU_SET_S(cpu, cpuset_size, td->bind_cpumask);
1471         }
1472 }
1473
1474 static void deinit_thread_data(void)
1475 {
1476         ssize_t size = sizeof(*g->threads)*g->p.nr_tasks;
1477         int t;
1478
1479         /* Free the bind_cpumask allocated for thread_data */
1480         for (t = 0; t < g->p.nr_tasks; t++) {
1481                 struct thread_data *td = g->threads + t;
1482                 CPU_FREE(td->bind_cpumask);
1483         }
1484
1485         free_data(g->threads, size);
1486 }
1487
1488 static int init(void)
1489 {
1490         g = (void *)alloc_data(sizeof(*g), MAP_SHARED, 1, 0, 0 /* THP */, 0);
1491
1492         /* Copy over options: */
1493         g->p = p0;
1494
1495         g->p.nr_cpus = numa_num_configured_cpus();
1496
1497         g->p.nr_nodes = numa_max_node() + 1;
1498
1499         /* char array in count_process_nodes(): */
1500         BUG_ON(g->p.nr_nodes < 0);
1501
1502         if (g->p.show_quiet && !g->p.show_details)
1503                 g->p.show_details = -1;
1504
1505         /* Some memory should be specified: */
1506         if (!g->p.mb_global_str && !g->p.mb_proc_str && !g->p.mb_thread_str)
1507                 return -1;
1508
1509         if (g->p.mb_global_str) {
1510                 g->p.mb_global = atof(g->p.mb_global_str);
1511                 BUG_ON(g->p.mb_global < 0);
1512         }
1513
1514         if (g->p.mb_proc_str) {
1515                 g->p.mb_proc = atof(g->p.mb_proc_str);
1516                 BUG_ON(g->p.mb_proc < 0);
1517         }
1518
1519         if (g->p.mb_proc_locked_str) {
1520                 g->p.mb_proc_locked = atof(g->p.mb_proc_locked_str);
1521                 BUG_ON(g->p.mb_proc_locked < 0);
1522                 BUG_ON(g->p.mb_proc_locked > g->p.mb_proc);
1523         }
1524
1525         if (g->p.mb_thread_str) {
1526                 g->p.mb_thread = atof(g->p.mb_thread_str);
1527                 BUG_ON(g->p.mb_thread < 0);
1528         }
1529
1530         BUG_ON(g->p.nr_threads <= 0);
1531         BUG_ON(g->p.nr_proc <= 0);
1532
1533         g->p.nr_tasks = g->p.nr_proc*g->p.nr_threads;
1534
1535         g->p.bytes_global               = g->p.mb_global        *1024L*1024L;
1536         g->p.bytes_process              = g->p.mb_proc          *1024L*1024L;
1537         g->p.bytes_process_locked       = g->p.mb_proc_locked   *1024L*1024L;
1538         g->p.bytes_thread               = g->p.mb_thread        *1024L*1024L;
1539
1540         g->data = setup_shared_data(g->p.bytes_global);
1541
1542         /* Startup serialization: */
1543         init_global_mutex(&g->start_work_mutex);
1544         init_global_cond(&g->start_work_cond);
1545         init_global_mutex(&g->startup_mutex);
1546         init_global_cond(&g->startup_cond);
1547         init_global_mutex(&g->stop_work_mutex);
1548
1549         init_thread_data();
1550
1551         tprintf("#\n");
1552         if (parse_setup_cpu_list() || parse_setup_node_list())
1553                 return -1;
1554         tprintf("#\n");
1555
1556         print_summary();
1557
1558         return 0;
1559 }
1560
1561 static void deinit(void)
1562 {
1563         free_data(g->data, g->p.bytes_global);
1564         g->data = NULL;
1565
1566         deinit_thread_data();
1567
1568         free_data(g, sizeof(*g));
1569         g = NULL;
1570 }
1571
1572 /*
1573  * Print a short or long result, depending on the verbosity setting:
1574  */
1575 static void print_res(const char *name, double val,
1576                       const char *txt_unit, const char *txt_short, const char *txt_long)
1577 {
1578         if (!name)
1579                 name = "main,";
1580
1581         if (!g->p.show_quiet)
1582                 printf(" %-30s %15.3f, %-15s %s\n", name, val, txt_unit, txt_short);
1583         else
1584                 printf(" %14.3f %s\n", val, txt_long);
1585 }
1586
1587 static int __bench_numa(const char *name)
1588 {
1589         struct timeval start, stop, diff;
1590         u64 runtime_ns_min, runtime_ns_sum;
1591         pid_t *pids, pid, wpid;
1592         double delta_runtime;
1593         double runtime_avg;
1594         double runtime_sec_max;
1595         double runtime_sec_min;
1596         int wait_stat;
1597         double bytes;
1598         int i, t, p;
1599
1600         if (init())
1601                 return -1;
1602
1603         pids = zalloc(g->p.nr_proc * sizeof(*pids));
1604         pid = -1;
1605
1606         if (g->p.serialize_startup) {
1607                 tprintf(" #\n");
1608                 tprintf(" # Startup synchronization: ..."); fflush(stdout);
1609         }
1610
1611         gettimeofday(&start, NULL);
1612
1613         for (i = 0; i < g->p.nr_proc; i++) {
1614                 pid = fork();
1615                 dprintf(" # process %2d: PID %d\n", i, pid);
1616
1617                 BUG_ON(pid < 0);
1618                 if (!pid) {
1619                         /* Child process: */
1620                         worker_process(i);
1621
1622                         exit(0);
1623                 }
1624                 pids[i] = pid;
1625
1626         }
1627
1628         if (g->p.serialize_startup) {
1629                 bool threads_ready = false;
1630                 double startup_sec;
1631
1632                 /*
1633                  * Wait for all the threads to start up. The last thread will
1634                  * signal this process.
1635                  */
1636                 pthread_mutex_lock(&g->startup_mutex);
1637                 while (g->nr_tasks_started != g->p.nr_tasks)
1638                         pthread_cond_wait(&g->startup_cond, &g->startup_mutex);
1639
1640                 pthread_mutex_unlock(&g->startup_mutex);
1641
1642                 /* Wait for all threads to be at the start_work_cond. */
1643                 while (!threads_ready) {
1644                         pthread_mutex_lock(&g->start_work_mutex);
1645                         threads_ready = (g->nr_tasks_working == g->p.nr_tasks);
1646                         pthread_mutex_unlock(&g->start_work_mutex);
1647                         if (!threads_ready)
1648                                 usleep(1);
1649                 }
1650
1651                 gettimeofday(&stop, NULL);
1652
1653                 timersub(&stop, &start, &diff);
1654
1655                 startup_sec = diff.tv_sec * NSEC_PER_SEC;
1656                 startup_sec += diff.tv_usec * NSEC_PER_USEC;
1657                 startup_sec /= NSEC_PER_SEC;
1658
1659                 tprintf(" threads initialized in %.6f seconds.\n", startup_sec);
1660                 tprintf(" #\n");
1661
1662                 start = stop;
1663                 /* Start all threads running. */
1664                 pthread_mutex_lock(&g->start_work_mutex);
1665                 g->start_work = true;
1666                 pthread_mutex_unlock(&g->start_work_mutex);
1667                 pthread_cond_broadcast(&g->start_work_cond);
1668         } else {
1669                 gettimeofday(&start, NULL);
1670         }
1671
1672         /* Parent process: */
1673
1674
1675         for (i = 0; i < g->p.nr_proc; i++) {
1676                 wpid = waitpid(pids[i], &wait_stat, 0);
1677                 BUG_ON(wpid < 0);
1678                 BUG_ON(!WIFEXITED(wait_stat));
1679
1680         }
1681
1682         runtime_ns_sum = 0;
1683         runtime_ns_min = -1LL;
1684
1685         for (t = 0; t < g->p.nr_tasks; t++) {
1686                 u64 thread_runtime_ns = g->threads[t].runtime_ns;
1687
1688                 runtime_ns_sum += thread_runtime_ns;
1689                 runtime_ns_min = min(thread_runtime_ns, runtime_ns_min);
1690         }
1691
1692         gettimeofday(&stop, NULL);
1693         timersub(&stop, &start, &diff);
1694
1695         BUG_ON(bench_format != BENCH_FORMAT_DEFAULT);
1696
1697         tprintf("\n ###\n");
1698         tprintf("\n");
1699
1700         runtime_sec_max = diff.tv_sec * NSEC_PER_SEC;
1701         runtime_sec_max += diff.tv_usec * NSEC_PER_USEC;
1702         runtime_sec_max /= NSEC_PER_SEC;
1703
1704         runtime_sec_min = runtime_ns_min / NSEC_PER_SEC;
1705
1706         bytes = g->bytes_done;
1707         runtime_avg = (double)runtime_ns_sum / g->p.nr_tasks / NSEC_PER_SEC;
1708
1709         if (g->p.measure_convergence) {
1710                 print_res(name, runtime_sec_max,
1711                         "secs,", "NUMA-convergence-latency", "secs latency to NUMA-converge");
1712         }
1713
1714         print_res(name, runtime_sec_max,
1715                 "secs,", "runtime-max/thread",  "secs slowest (max) thread-runtime");
1716
1717         print_res(name, runtime_sec_min,
1718                 "secs,", "runtime-min/thread",  "secs fastest (min) thread-runtime");
1719
1720         print_res(name, runtime_avg,
1721                 "secs,", "runtime-avg/thread",  "secs average thread-runtime");
1722
1723         delta_runtime = (runtime_sec_max - runtime_sec_min)/2.0;
1724         print_res(name, delta_runtime / runtime_sec_max * 100.0,
1725                 "%,", "spread-runtime/thread",  "% difference between max/avg runtime");
1726
1727         print_res(name, bytes / g->p.nr_tasks / 1e9,
1728                 "GB,", "data/thread",           "GB data processed, per thread");
1729
1730         print_res(name, bytes / 1e9,
1731                 "GB,", "data-total",            "GB data processed, total");
1732
1733         print_res(name, runtime_sec_max * NSEC_PER_SEC / (bytes / g->p.nr_tasks),
1734                 "nsecs,", "runtime/byte/thread","nsecs/byte/thread runtime");
1735
1736         print_res(name, bytes / g->p.nr_tasks / 1e9 / runtime_sec_max,
1737                 "GB/sec,", "thread-speed",      "GB/sec/thread speed");
1738
1739         print_res(name, bytes / runtime_sec_max / 1e9,
1740                 "GB/sec,", "total-speed",       "GB/sec total speed");
1741
1742         if (g->p.show_details >= 2) {
1743                 char tname[14 + 2 * 11 + 1];
1744                 struct thread_data *td;
1745                 for (p = 0; p < g->p.nr_proc; p++) {
1746                         for (t = 0; t < g->p.nr_threads; t++) {
1747                                 memset(tname, 0, sizeof(tname));
1748                                 td = g->threads + p*g->p.nr_threads + t;
1749                                 snprintf(tname, sizeof(tname), "process%d:thread%d", p, t);
1750                                 print_res(tname, td->speed_gbs,
1751                                         "GB/sec",       "thread-speed", "GB/sec/thread speed");
1752                                 print_res(tname, td->system_time_ns / NSEC_PER_SEC,
1753                                         "secs", "thread-system-time", "system CPU time/thread");
1754                                 print_res(tname, td->user_time_ns / NSEC_PER_SEC,
1755                                         "secs", "thread-user-time", "user CPU time/thread");
1756                         }
1757                 }
1758         }
1759
1760         free(pids);
1761
1762         deinit();
1763
1764         return 0;
1765 }
1766
1767 #define MAX_ARGS 50
1768
1769 static int command_size(const char **argv)
1770 {
1771         int size = 0;
1772
1773         while (*argv) {
1774                 size++;
1775                 argv++;
1776         }
1777
1778         BUG_ON(size >= MAX_ARGS);
1779
1780         return size;
1781 }
1782
1783 static void init_params(struct params *p, const char *name, int argc, const char **argv)
1784 {
1785         int i;
1786
1787         printf("\n # Running %s \"perf bench numa", name);
1788
1789         for (i = 0; i < argc; i++)
1790                 printf(" %s", argv[i]);
1791
1792         printf("\"\n");
1793
1794         memset(p, 0, sizeof(*p));
1795
1796         /* Initialize nonzero defaults: */
1797
1798         p->serialize_startup            = 1;
1799         p->data_reads                   = true;
1800         p->data_writes                  = true;
1801         p->data_backwards               = true;
1802         p->data_rand_walk               = true;
1803         p->nr_loops                     = -1;
1804         p->init_random                  = true;
1805         p->mb_global_str                = "1";
1806         p->nr_proc                      = 1;
1807         p->nr_threads                   = 1;
1808         p->nr_secs                      = 5;
1809         p->run_all                      = argc == 1;
1810 }
1811
1812 static int run_bench_numa(const char *name, const char **argv)
1813 {
1814         int argc = command_size(argv);
1815
1816         init_params(&p0, name, argc, argv);
1817         argc = parse_options(argc, argv, options, bench_numa_usage, 0);
1818         if (argc)
1819                 goto err;
1820
1821         if (__bench_numa(name))
1822                 goto err;
1823
1824         return 0;
1825
1826 err:
1827         return -1;
1828 }
1829
1830 #define OPT_BW_RAM              "-s",  "20", "-zZq",    "--thp", " 1", "--no-data_rand_walk"
1831 #define OPT_BW_RAM_NOTHP        OPT_BW_RAM,             "--thp", "-1"
1832
1833 #define OPT_CONV                "-s", "100", "-zZ0qcm", "--thp", " 1"
1834 #define OPT_CONV_NOTHP          OPT_CONV,               "--thp", "-1"
1835
1836 #define OPT_BW                  "-s",  "20", "-zZ0q",   "--thp", " 1"
1837 #define OPT_BW_NOTHP            OPT_BW,                 "--thp", "-1"
1838
1839 /*
1840  * The built-in test-suite executed by "perf bench numa -a".
1841  *
1842  * (A minimum of 4 nodes and 16 GB of RAM is recommended.)
1843  */
1844 static const char *tests[][MAX_ARGS] = {
1845    /* Basic single-stream NUMA bandwidth measurements: */
1846    { "RAM-bw-local,",     "mem",  "-p",  "1",  "-t",  "1", "-P", "1024",
1847                           "-C" ,   "0", "-M",   "0", OPT_BW_RAM },
1848    { "RAM-bw-local-NOTHP,",
1849                           "mem",  "-p",  "1",  "-t",  "1", "-P", "1024",
1850                           "-C" ,   "0", "-M",   "0", OPT_BW_RAM_NOTHP },
1851    { "RAM-bw-remote,",    "mem",  "-p",  "1",  "-t",  "1", "-P", "1024",
1852                           "-C" ,   "0", "-M",   "1", OPT_BW_RAM },
1853
1854    /* 2-stream NUMA bandwidth measurements: */
1855    { "RAM-bw-local-2x,",  "mem",  "-p",  "2",  "-t",  "1", "-P", "1024",
1856                            "-C", "0,2", "-M", "0x2", OPT_BW_RAM },
1857    { "RAM-bw-remote-2x,", "mem",  "-p",  "2",  "-t",  "1", "-P", "1024",
1858                            "-C", "0,2", "-M", "1x2", OPT_BW_RAM },
1859
1860    /* Cross-stream NUMA bandwidth measurement: */
1861    { "RAM-bw-cross,",     "mem",  "-p",  "2",  "-t",  "1", "-P", "1024",
1862                            "-C", "0,8", "-M", "1,0", OPT_BW_RAM },
1863
1864    /* Convergence latency measurements: */
1865    { " 1x3-convergence,", "mem",  "-p",  "1", "-t",  "3", "-P",  "512", OPT_CONV },
1866    { " 1x4-convergence,", "mem",  "-p",  "1", "-t",  "4", "-P",  "512", OPT_CONV },
1867    { " 1x6-convergence,", "mem",  "-p",  "1", "-t",  "6", "-P", "1020", OPT_CONV },
1868    { " 2x3-convergence,", "mem",  "-p",  "2", "-t",  "3", "-P", "1020", OPT_CONV },
1869    { " 3x3-convergence,", "mem",  "-p",  "3", "-t",  "3", "-P", "1020", OPT_CONV },
1870    { " 4x4-convergence,", "mem",  "-p",  "4", "-t",  "4", "-P",  "512", OPT_CONV },
1871    { " 4x4-convergence-NOTHP,",
1872                           "mem",  "-p",  "4", "-t",  "4", "-P",  "512", OPT_CONV_NOTHP },
1873    { " 4x6-convergence,", "mem",  "-p",  "4", "-t",  "6", "-P", "1020", OPT_CONV },
1874    { " 4x8-convergence,", "mem",  "-p",  "4", "-t",  "8", "-P",  "512", OPT_CONV },
1875    { " 8x4-convergence,", "mem",  "-p",  "8", "-t",  "4", "-P",  "512", OPT_CONV },
1876    { " 8x4-convergence-NOTHP,",
1877                           "mem",  "-p",  "8", "-t",  "4", "-P",  "512", OPT_CONV_NOTHP },
1878    { " 3x1-convergence,", "mem",  "-p",  "3", "-t",  "1", "-P",  "512", OPT_CONV },
1879    { " 4x1-convergence,", "mem",  "-p",  "4", "-t",  "1", "-P",  "512", OPT_CONV },
1880    { " 8x1-convergence,", "mem",  "-p",  "8", "-t",  "1", "-P",  "512", OPT_CONV },
1881    { "16x1-convergence,", "mem",  "-p", "16", "-t",  "1", "-P",  "256", OPT_CONV },
1882    { "32x1-convergence,", "mem",  "-p", "32", "-t",  "1", "-P",  "128", OPT_CONV },
1883
1884    /* Various NUMA process/thread layout bandwidth measurements: */
1885    { " 2x1-bw-process,",  "mem",  "-p",  "2", "-t",  "1", "-P", "1024", OPT_BW },
1886    { " 3x1-bw-process,",  "mem",  "-p",  "3", "-t",  "1", "-P", "1024", OPT_BW },
1887    { " 4x1-bw-process,",  "mem",  "-p",  "4", "-t",  "1", "-P", "1024", OPT_BW },
1888    { " 8x1-bw-process,",  "mem",  "-p",  "8", "-t",  "1", "-P", " 512", OPT_BW },
1889    { " 8x1-bw-process-NOTHP,",
1890                           "mem",  "-p",  "8", "-t",  "1", "-P", " 512", OPT_BW_NOTHP },
1891    { "16x1-bw-process,",  "mem",  "-p", "16", "-t",  "1", "-P",  "256", OPT_BW },
1892
1893    { " 1x4-bw-thread,",   "mem",  "-p",  "1", "-t",  "4", "-T",  "256", OPT_BW },
1894    { " 1x8-bw-thread,",   "mem",  "-p",  "1", "-t",  "8", "-T",  "256", OPT_BW },
1895    { "1x16-bw-thread,",   "mem",  "-p",  "1", "-t", "16", "-T",  "128", OPT_BW },
1896    { "1x32-bw-thread,",   "mem",  "-p",  "1", "-t", "32", "-T",   "64", OPT_BW },
1897
1898    { " 2x3-bw-process,",  "mem",  "-p",  "2", "-t",  "3", "-P",  "512", OPT_BW },
1899    { " 4x4-bw-process,",  "mem",  "-p",  "4", "-t",  "4", "-P",  "512", OPT_BW },
1900    { " 4x6-bw-process,",  "mem",  "-p",  "4", "-t",  "6", "-P",  "512", OPT_BW },
1901    { " 4x8-bw-process,",  "mem",  "-p",  "4", "-t",  "8", "-P",  "512", OPT_BW },
1902    { " 4x8-bw-process-NOTHP,",
1903                           "mem",  "-p",  "4", "-t",  "8", "-P",  "512", OPT_BW_NOTHP },
1904    { " 3x3-bw-process,",  "mem",  "-p",  "3", "-t",  "3", "-P",  "512", OPT_BW },
1905    { " 5x5-bw-process,",  "mem",  "-p",  "5", "-t",  "5", "-P",  "512", OPT_BW },
1906
1907    { "2x16-bw-process,",  "mem",  "-p",  "2", "-t", "16", "-P",  "512", OPT_BW },
1908    { "1x32-bw-process,",  "mem",  "-p",  "1", "-t", "32", "-P", "2048", OPT_BW },
1909
1910    { "numa02-bw,",        "mem",  "-p",  "1", "-t", "32", "-T",   "32", OPT_BW },
1911    { "numa02-bw-NOTHP,",  "mem",  "-p",  "1", "-t", "32", "-T",   "32", OPT_BW_NOTHP },
1912    { "numa01-bw-thread,", "mem",  "-p",  "2", "-t", "16", "-T",  "192", OPT_BW },
1913    { "numa01-bw-thread-NOTHP,",
1914                           "mem",  "-p",  "2", "-t", "16", "-T",  "192", OPT_BW_NOTHP },
1915 };
1916
1917 static int bench_all(void)
1918 {
1919         int nr = ARRAY_SIZE(tests);
1920         int ret;
1921         int i;
1922
1923         ret = system("echo ' #'; echo ' # Running test on: '$(uname -a); echo ' #'");
1924         BUG_ON(ret < 0);
1925
1926         for (i = 0; i < nr; i++) {
1927                 run_bench_numa(tests[i][0], tests[i] + 1);
1928         }
1929
1930         printf("\n");
1931
1932         return 0;
1933 }
1934
1935 int bench_numa(int argc, const char **argv)
1936 {
1937         init_params(&p0, "main,", argc, argv);
1938         argc = parse_options(argc, argv, options, bench_numa_usage, 0);
1939         if (argc)
1940                 goto err;
1941
1942         if (p0.run_all)
1943                 return bench_all();
1944
1945         if (__bench_numa(NULL))
1946                 goto err;
1947
1948         return 0;
1949
1950 err:
1951         usage_with_options(numa_usage, options);
1952         return -1;
1953 }