GNU Linux-libre 4.9.294-gnu1
[releases.git] / tools / perf / util / header.c
1 #include "util.h"
2 #include <sys/types.h>
3 #include <byteswap.h>
4 #include <unistd.h>
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <linux/list.h>
8 #include <linux/kernel.h>
9 #include <linux/bitops.h>
10 #include <sys/utsname.h>
11
12 #include "evlist.h"
13 #include "evsel.h"
14 #include "header.h"
15 #include "../perf.h"
16 #include "trace-event.h"
17 #include "session.h"
18 #include "symbol.h"
19 #include "debug.h"
20 #include "cpumap.h"
21 #include "pmu.h"
22 #include "vdso.h"
23 #include "strbuf.h"
24 #include "build-id.h"
25 #include "data.h"
26 #include <api/fs/fs.h>
27 #include "asm/bug.h"
28
29 /*
30  * magic2 = "PERFILE2"
31  * must be a numerical value to let the endianness
32  * determine the memory layout. That way we are able
33  * to detect endianness when reading the perf.data file
34  * back.
35  *
36  * we check for legacy (PERFFILE) format.
37  */
38 static const char *__perf_magic1 = "PERFFILE";
39 static const u64 __perf_magic2    = 0x32454c4946524550ULL;
40 static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
41
42 #define PERF_MAGIC      __perf_magic2
43
44 struct perf_file_attr {
45         struct perf_event_attr  attr;
46         struct perf_file_section        ids;
47 };
48
49 void perf_header__set_feat(struct perf_header *header, int feat)
50 {
51         set_bit(feat, header->adds_features);
52 }
53
54 void perf_header__clear_feat(struct perf_header *header, int feat)
55 {
56         clear_bit(feat, header->adds_features);
57 }
58
59 bool perf_header__has_feat(const struct perf_header *header, int feat)
60 {
61         return test_bit(feat, header->adds_features);
62 }
63
64 static int do_write(int fd, const void *buf, size_t size)
65 {
66         while (size) {
67                 int ret = write(fd, buf, size);
68
69                 if (ret < 0)
70                         return -errno;
71
72                 size -= ret;
73                 buf += ret;
74         }
75
76         return 0;
77 }
78
79 int write_padded(int fd, const void *bf, size_t count, size_t count_aligned)
80 {
81         static const char zero_buf[NAME_ALIGN];
82         int err = do_write(fd, bf, count);
83
84         if (!err)
85                 err = do_write(fd, zero_buf, count_aligned - count);
86
87         return err;
88 }
89
90 #define string_size(str)                                                \
91         (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))
92
93 static int do_write_string(int fd, const char *str)
94 {
95         u32 len, olen;
96         int ret;
97
98         olen = strlen(str) + 1;
99         len = PERF_ALIGN(olen, NAME_ALIGN);
100
101         /* write len, incl. \0 */
102         ret = do_write(fd, &len, sizeof(len));
103         if (ret < 0)
104                 return ret;
105
106         return write_padded(fd, str, olen, len);
107 }
108
109 static char *do_read_string(int fd, struct perf_header *ph)
110 {
111         ssize_t sz, ret;
112         u32 len;
113         char *buf;
114
115         sz = readn(fd, &len, sizeof(len));
116         if (sz < (ssize_t)sizeof(len))
117                 return NULL;
118
119         if (ph->needs_swap)
120                 len = bswap_32(len);
121
122         buf = malloc(len);
123         if (!buf)
124                 return NULL;
125
126         ret = readn(fd, buf, len);
127         if (ret == (ssize_t)len) {
128                 /*
129                  * strings are padded by zeroes
130                  * thus the actual strlen of buf
131                  * may be less than len
132                  */
133                 return buf;
134         }
135
136         free(buf);
137         return NULL;
138 }
139
140 static int write_tracing_data(int fd, struct perf_header *h __maybe_unused,
141                             struct perf_evlist *evlist)
142 {
143         return read_tracing_data(fd, &evlist->entries);
144 }
145
146
147 static int write_build_id(int fd, struct perf_header *h,
148                           struct perf_evlist *evlist __maybe_unused)
149 {
150         struct perf_session *session;
151         int err;
152
153         session = container_of(h, struct perf_session, header);
154
155         if (!perf_session__read_build_ids(session, true))
156                 return -1;
157
158         err = perf_session__write_buildid_table(session, fd);
159         if (err < 0) {
160                 pr_debug("failed to write buildid table\n");
161                 return err;
162         }
163         perf_session__cache_build_ids(session);
164
165         return 0;
166 }
167
168 static int write_hostname(int fd, struct perf_header *h __maybe_unused,
169                           struct perf_evlist *evlist __maybe_unused)
170 {
171         struct utsname uts;
172         int ret;
173
174         ret = uname(&uts);
175         if (ret < 0)
176                 return -1;
177
178         return do_write_string(fd, uts.nodename);
179 }
180
181 static int write_osrelease(int fd, struct perf_header *h __maybe_unused,
182                            struct perf_evlist *evlist __maybe_unused)
183 {
184         struct utsname uts;
185         int ret;
186
187         ret = uname(&uts);
188         if (ret < 0)
189                 return -1;
190
191         return do_write_string(fd, uts.release);
192 }
193
194 static int write_arch(int fd, struct perf_header *h __maybe_unused,
195                       struct perf_evlist *evlist __maybe_unused)
196 {
197         struct utsname uts;
198         int ret;
199
200         ret = uname(&uts);
201         if (ret < 0)
202                 return -1;
203
204         return do_write_string(fd, uts.machine);
205 }
206
207 static int write_version(int fd, struct perf_header *h __maybe_unused,
208                          struct perf_evlist *evlist __maybe_unused)
209 {
210         return do_write_string(fd, perf_version_string);
211 }
212
213 static int __write_cpudesc(int fd, const char *cpuinfo_proc)
214 {
215         FILE *file;
216         char *buf = NULL;
217         char *s, *p;
218         const char *search = cpuinfo_proc;
219         size_t len = 0;
220         int ret = -1;
221
222         if (!search)
223                 return -1;
224
225         file = fopen("/proc/cpuinfo", "r");
226         if (!file)
227                 return -1;
228
229         while (getline(&buf, &len, file) > 0) {
230                 ret = strncmp(buf, search, strlen(search));
231                 if (!ret)
232                         break;
233         }
234
235         if (ret) {
236                 ret = -1;
237                 goto done;
238         }
239
240         s = buf;
241
242         p = strchr(buf, ':');
243         if (p && *(p+1) == ' ' && *(p+2))
244                 s = p + 2;
245         p = strchr(s, '\n');
246         if (p)
247                 *p = '\0';
248
249         /* squash extra space characters (branding string) */
250         p = s;
251         while (*p) {
252                 if (isspace(*p)) {
253                         char *r = p + 1;
254                         char *q = r;
255                         *p = ' ';
256                         while (*q && isspace(*q))
257                                 q++;
258                         if (q != (p+1))
259                                 while ((*r++ = *q++));
260                 }
261                 p++;
262         }
263         ret = do_write_string(fd, s);
264 done:
265         free(buf);
266         fclose(file);
267         return ret;
268 }
269
270 static int write_cpudesc(int fd, struct perf_header *h __maybe_unused,
271                        struct perf_evlist *evlist __maybe_unused)
272 {
273 #ifndef CPUINFO_PROC
274 #define CPUINFO_PROC {"model name", }
275 #endif
276         const char *cpuinfo_procs[] = CPUINFO_PROC;
277         unsigned int i;
278
279         for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) {
280                 int ret;
281                 ret = __write_cpudesc(fd, cpuinfo_procs[i]);
282                 if (ret >= 0)
283                         return ret;
284         }
285         return -1;
286 }
287
288
289 static int write_nrcpus(int fd, struct perf_header *h __maybe_unused,
290                         struct perf_evlist *evlist __maybe_unused)
291 {
292         long nr;
293         u32 nrc, nra;
294         int ret;
295
296         nr = sysconf(_SC_NPROCESSORS_CONF);
297         if (nr < 0)
298                 return -1;
299
300         nrc = (u32)(nr & UINT_MAX);
301
302         nr = sysconf(_SC_NPROCESSORS_ONLN);
303         if (nr < 0)
304                 return -1;
305
306         nra = (u32)(nr & UINT_MAX);
307
308         ret = do_write(fd, &nrc, sizeof(nrc));
309         if (ret < 0)
310                 return ret;
311
312         return do_write(fd, &nra, sizeof(nra));
313 }
314
315 static int write_event_desc(int fd, struct perf_header *h __maybe_unused,
316                             struct perf_evlist *evlist)
317 {
318         struct perf_evsel *evsel;
319         u32 nre, nri, sz;
320         int ret;
321
322         nre = evlist->nr_entries;
323
324         /*
325          * write number of events
326          */
327         ret = do_write(fd, &nre, sizeof(nre));
328         if (ret < 0)
329                 return ret;
330
331         /*
332          * size of perf_event_attr struct
333          */
334         sz = (u32)sizeof(evsel->attr);
335         ret = do_write(fd, &sz, sizeof(sz));
336         if (ret < 0)
337                 return ret;
338
339         evlist__for_each_entry(evlist, evsel) {
340                 ret = do_write(fd, &evsel->attr, sz);
341                 if (ret < 0)
342                         return ret;
343                 /*
344                  * write number of unique id per event
345                  * there is one id per instance of an event
346                  *
347                  * copy into an nri to be independent of the
348                  * type of ids,
349                  */
350                 nri = evsel->ids;
351                 ret = do_write(fd, &nri, sizeof(nri));
352                 if (ret < 0)
353                         return ret;
354
355                 /*
356                  * write event string as passed on cmdline
357                  */
358                 ret = do_write_string(fd, perf_evsel__name(evsel));
359                 if (ret < 0)
360                         return ret;
361                 /*
362                  * write unique ids for this event
363                  */
364                 ret = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
365                 if (ret < 0)
366                         return ret;
367         }
368         return 0;
369 }
370
371 static int write_cmdline(int fd, struct perf_header *h __maybe_unused,
372                          struct perf_evlist *evlist __maybe_unused)
373 {
374         char buf[MAXPATHLEN];
375         char proc[32];
376         u32 n;
377         int i, ret;
378
379         /*
380          * actual atual path to perf binary
381          */
382         sprintf(proc, "/proc/%d/exe", getpid());
383         ret = readlink(proc, buf, sizeof(buf));
384         if (ret <= 0)
385                 return -1;
386
387         /* readlink() does not add null termination */
388         buf[ret] = '\0';
389
390         /* account for binary path */
391         n = perf_env.nr_cmdline + 1;
392
393         ret = do_write(fd, &n, sizeof(n));
394         if (ret < 0)
395                 return ret;
396
397         ret = do_write_string(fd, buf);
398         if (ret < 0)
399                 return ret;
400
401         for (i = 0 ; i < perf_env.nr_cmdline; i++) {
402                 ret = do_write_string(fd, perf_env.cmdline_argv[i]);
403                 if (ret < 0)
404                         return ret;
405         }
406         return 0;
407 }
408
409 #define CORE_SIB_FMT \
410         "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list"
411 #define THRD_SIB_FMT \
412         "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list"
413
414 struct cpu_topo {
415         u32 cpu_nr;
416         u32 core_sib;
417         u32 thread_sib;
418         char **core_siblings;
419         char **thread_siblings;
420 };
421
422 static int build_cpu_topo(struct cpu_topo *tp, int cpu)
423 {
424         FILE *fp;
425         char filename[MAXPATHLEN];
426         char *buf = NULL, *p;
427         size_t len = 0;
428         ssize_t sret;
429         u32 i = 0;
430         int ret = -1;
431
432         sprintf(filename, CORE_SIB_FMT, cpu);
433         fp = fopen(filename, "r");
434         if (!fp)
435                 goto try_threads;
436
437         sret = getline(&buf, &len, fp);
438         fclose(fp);
439         if (sret <= 0)
440                 goto try_threads;
441
442         p = strchr(buf, '\n');
443         if (p)
444                 *p = '\0';
445
446         for (i = 0; i < tp->core_sib; i++) {
447                 if (!strcmp(buf, tp->core_siblings[i]))
448                         break;
449         }
450         if (i == tp->core_sib) {
451                 tp->core_siblings[i] = buf;
452                 tp->core_sib++;
453                 buf = NULL;
454                 len = 0;
455         }
456         ret = 0;
457
458 try_threads:
459         sprintf(filename, THRD_SIB_FMT, cpu);
460         fp = fopen(filename, "r");
461         if (!fp)
462                 goto done;
463
464         if (getline(&buf, &len, fp) <= 0)
465                 goto done;
466
467         p = strchr(buf, '\n');
468         if (p)
469                 *p = '\0';
470
471         for (i = 0; i < tp->thread_sib; i++) {
472                 if (!strcmp(buf, tp->thread_siblings[i]))
473                         break;
474         }
475         if (i == tp->thread_sib) {
476                 tp->thread_siblings[i] = buf;
477                 tp->thread_sib++;
478                 buf = NULL;
479         }
480         ret = 0;
481 done:
482         if(fp)
483                 fclose(fp);
484         free(buf);
485         return ret;
486 }
487
488 static void free_cpu_topo(struct cpu_topo *tp)
489 {
490         u32 i;
491
492         if (!tp)
493                 return;
494
495         for (i = 0 ; i < tp->core_sib; i++)
496                 zfree(&tp->core_siblings[i]);
497
498         for (i = 0 ; i < tp->thread_sib; i++)
499                 zfree(&tp->thread_siblings[i]);
500
501         free(tp);
502 }
503
504 static struct cpu_topo *build_cpu_topology(void)
505 {
506         struct cpu_topo *tp;
507         void *addr;
508         u32 nr, i;
509         size_t sz;
510         long ncpus;
511         int ret = -1;
512
513         ncpus = sysconf(_SC_NPROCESSORS_CONF);
514         if (ncpus < 0)
515                 return NULL;
516
517         nr = (u32)(ncpus & UINT_MAX);
518
519         sz = nr * sizeof(char *);
520
521         addr = calloc(1, sizeof(*tp) + 2 * sz);
522         if (!addr)
523                 return NULL;
524
525         tp = addr;
526         tp->cpu_nr = nr;
527         addr += sizeof(*tp);
528         tp->core_siblings = addr;
529         addr += sz;
530         tp->thread_siblings = addr;
531
532         for (i = 0; i < nr; i++) {
533                 ret = build_cpu_topo(tp, i);
534                 if (ret < 0)
535                         break;
536         }
537         if (ret) {
538                 free_cpu_topo(tp);
539                 tp = NULL;
540         }
541         return tp;
542 }
543
544 static int write_cpu_topology(int fd, struct perf_header *h __maybe_unused,
545                           struct perf_evlist *evlist __maybe_unused)
546 {
547         struct cpu_topo *tp;
548         u32 i;
549         int ret, j;
550
551         tp = build_cpu_topology();
552         if (!tp)
553                 return -1;
554
555         ret = do_write(fd, &tp->core_sib, sizeof(tp->core_sib));
556         if (ret < 0)
557                 goto done;
558
559         for (i = 0; i < tp->core_sib; i++) {
560                 ret = do_write_string(fd, tp->core_siblings[i]);
561                 if (ret < 0)
562                         goto done;
563         }
564         ret = do_write(fd, &tp->thread_sib, sizeof(tp->thread_sib));
565         if (ret < 0)
566                 goto done;
567
568         for (i = 0; i < tp->thread_sib; i++) {
569                 ret = do_write_string(fd, tp->thread_siblings[i]);
570                 if (ret < 0)
571                         break;
572         }
573
574         ret = perf_env__read_cpu_topology_map(&perf_env);
575         if (ret < 0)
576                 goto done;
577
578         for (j = 0; j < perf_env.nr_cpus_avail; j++) {
579                 ret = do_write(fd, &perf_env.cpu[j].core_id,
580                                sizeof(perf_env.cpu[j].core_id));
581                 if (ret < 0)
582                         return ret;
583                 ret = do_write(fd, &perf_env.cpu[j].socket_id,
584                                sizeof(perf_env.cpu[j].socket_id));
585                 if (ret < 0)
586                         return ret;
587         }
588 done:
589         free_cpu_topo(tp);
590         return ret;
591 }
592
593
594
595 static int write_total_mem(int fd, struct perf_header *h __maybe_unused,
596                           struct perf_evlist *evlist __maybe_unused)
597 {
598         char *buf = NULL;
599         FILE *fp;
600         size_t len = 0;
601         int ret = -1, n;
602         uint64_t mem;
603
604         fp = fopen("/proc/meminfo", "r");
605         if (!fp)
606                 return -1;
607
608         while (getline(&buf, &len, fp) > 0) {
609                 ret = strncmp(buf, "MemTotal:", 9);
610                 if (!ret)
611                         break;
612         }
613         if (!ret) {
614                 n = sscanf(buf, "%*s %"PRIu64, &mem);
615                 if (n == 1)
616                         ret = do_write(fd, &mem, sizeof(mem));
617         } else
618                 ret = -1;
619         free(buf);
620         fclose(fp);
621         return ret;
622 }
623
624 static int write_topo_node(int fd, int node)
625 {
626         char str[MAXPATHLEN];
627         char field[32];
628         char *buf = NULL, *p;
629         size_t len = 0;
630         FILE *fp;
631         u64 mem_total, mem_free, mem;
632         int ret = -1;
633
634         sprintf(str, "/sys/devices/system/node/node%d/meminfo", node);
635         fp = fopen(str, "r");
636         if (!fp)
637                 return -1;
638
639         while (getline(&buf, &len, fp) > 0) {
640                 /* skip over invalid lines */
641                 if (!strchr(buf, ':'))
642                         continue;
643                 if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2)
644                         goto done;
645                 if (!strcmp(field, "MemTotal:"))
646                         mem_total = mem;
647                 if (!strcmp(field, "MemFree:"))
648                         mem_free = mem;
649         }
650
651         fclose(fp);
652         fp = NULL;
653
654         ret = do_write(fd, &mem_total, sizeof(u64));
655         if (ret)
656                 goto done;
657
658         ret = do_write(fd, &mem_free, sizeof(u64));
659         if (ret)
660                 goto done;
661
662         ret = -1;
663         sprintf(str, "/sys/devices/system/node/node%d/cpulist", node);
664
665         fp = fopen(str, "r");
666         if (!fp)
667                 goto done;
668
669         if (getline(&buf, &len, fp) <= 0)
670                 goto done;
671
672         p = strchr(buf, '\n');
673         if (p)
674                 *p = '\0';
675
676         ret = do_write_string(fd, buf);
677 done:
678         free(buf);
679         if (fp)
680                 fclose(fp);
681         return ret;
682 }
683
684 static int write_numa_topology(int fd, struct perf_header *h __maybe_unused,
685                           struct perf_evlist *evlist __maybe_unused)
686 {
687         char *buf = NULL;
688         size_t len = 0;
689         FILE *fp;
690         struct cpu_map *node_map = NULL;
691         char *c;
692         u32 nr, i, j;
693         int ret = -1;
694
695         fp = fopen("/sys/devices/system/node/online", "r");
696         if (!fp)
697                 return -1;
698
699         if (getline(&buf, &len, fp) <= 0)
700                 goto done;
701
702         c = strchr(buf, '\n');
703         if (c)
704                 *c = '\0';
705
706         node_map = cpu_map__new(buf);
707         if (!node_map)
708                 goto done;
709
710         nr = (u32)node_map->nr;
711
712         ret = do_write(fd, &nr, sizeof(nr));
713         if (ret < 0)
714                 goto done;
715
716         for (i = 0; i < nr; i++) {
717                 j = (u32)node_map->map[i];
718                 ret = do_write(fd, &j, sizeof(j));
719                 if (ret < 0)
720                         break;
721
722                 ret = write_topo_node(fd, i);
723                 if (ret < 0)
724                         break;
725         }
726 done:
727         free(buf);
728         fclose(fp);
729         cpu_map__put(node_map);
730         return ret;
731 }
732
733 /*
734  * File format:
735  *
736  * struct pmu_mappings {
737  *      u32     pmu_num;
738  *      struct pmu_map {
739  *              u32     type;
740  *              char    name[];
741  *      }[pmu_num];
742  * };
743  */
744
745 static int write_pmu_mappings(int fd, struct perf_header *h __maybe_unused,
746                               struct perf_evlist *evlist __maybe_unused)
747 {
748         struct perf_pmu *pmu = NULL;
749         off_t offset = lseek(fd, 0, SEEK_CUR);
750         __u32 pmu_num = 0;
751         int ret;
752
753         /* write real pmu_num later */
754         ret = do_write(fd, &pmu_num, sizeof(pmu_num));
755         if (ret < 0)
756                 return ret;
757
758         while ((pmu = perf_pmu__scan(pmu))) {
759                 if (!pmu->name)
760                         continue;
761                 pmu_num++;
762
763                 ret = do_write(fd, &pmu->type, sizeof(pmu->type));
764                 if (ret < 0)
765                         return ret;
766
767                 ret = do_write_string(fd, pmu->name);
768                 if (ret < 0)
769                         return ret;
770         }
771
772         if (pwrite(fd, &pmu_num, sizeof(pmu_num), offset) != sizeof(pmu_num)) {
773                 /* discard all */
774                 lseek(fd, offset, SEEK_SET);
775                 return -1;
776         }
777
778         return 0;
779 }
780
781 /*
782  * File format:
783  *
784  * struct group_descs {
785  *      u32     nr_groups;
786  *      struct group_desc {
787  *              char    name[];
788  *              u32     leader_idx;
789  *              u32     nr_members;
790  *      }[nr_groups];
791  * };
792  */
793 static int write_group_desc(int fd, struct perf_header *h __maybe_unused,
794                             struct perf_evlist *evlist)
795 {
796         u32 nr_groups = evlist->nr_groups;
797         struct perf_evsel *evsel;
798         int ret;
799
800         ret = do_write(fd, &nr_groups, sizeof(nr_groups));
801         if (ret < 0)
802                 return ret;
803
804         evlist__for_each_entry(evlist, evsel) {
805                 if (perf_evsel__is_group_leader(evsel) &&
806                     evsel->nr_members > 1) {
807                         const char *name = evsel->group_name ?: "{anon_group}";
808                         u32 leader_idx = evsel->idx;
809                         u32 nr_members = evsel->nr_members;
810
811                         ret = do_write_string(fd, name);
812                         if (ret < 0)
813                                 return ret;
814
815                         ret = do_write(fd, &leader_idx, sizeof(leader_idx));
816                         if (ret < 0)
817                                 return ret;
818
819                         ret = do_write(fd, &nr_members, sizeof(nr_members));
820                         if (ret < 0)
821                                 return ret;
822                 }
823         }
824         return 0;
825 }
826
827 /*
828  * default get_cpuid(): nothing gets recorded
829  * actual implementation must be in arch/$(SRCARCH)/util/header.c
830  */
831 int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused)
832 {
833         return -1;
834 }
835
836 static int write_cpuid(int fd, struct perf_header *h __maybe_unused,
837                        struct perf_evlist *evlist __maybe_unused)
838 {
839         char buffer[64];
840         int ret;
841
842         ret = get_cpuid(buffer, sizeof(buffer));
843         if (!ret)
844                 goto write_it;
845
846         return -1;
847 write_it:
848         return do_write_string(fd, buffer);
849 }
850
851 static int write_branch_stack(int fd __maybe_unused,
852                               struct perf_header *h __maybe_unused,
853                        struct perf_evlist *evlist __maybe_unused)
854 {
855         return 0;
856 }
857
858 static int write_auxtrace(int fd, struct perf_header *h,
859                           struct perf_evlist *evlist __maybe_unused)
860 {
861         struct perf_session *session;
862         int err;
863
864         session = container_of(h, struct perf_session, header);
865
866         err = auxtrace_index__write(fd, &session->auxtrace_index);
867         if (err < 0)
868                 pr_err("Failed to write auxtrace index\n");
869         return err;
870 }
871
872 static int cpu_cache_level__sort(const void *a, const void *b)
873 {
874         struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
875         struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b;
876
877         return cache_a->level - cache_b->level;
878 }
879
880 static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b)
881 {
882         if (a->level != b->level)
883                 return false;
884
885         if (a->line_size != b->line_size)
886                 return false;
887
888         if (a->sets != b->sets)
889                 return false;
890
891         if (a->ways != b->ways)
892                 return false;
893
894         if (strcmp(a->type, b->type))
895                 return false;
896
897         if (strcmp(a->size, b->size))
898                 return false;
899
900         if (strcmp(a->map, b->map))
901                 return false;
902
903         return true;
904 }
905
906 static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level)
907 {
908         char path[PATH_MAX], file[PATH_MAX];
909         struct stat st;
910         size_t len;
911
912         scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level);
913         scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path);
914
915         if (stat(file, &st))
916                 return 1;
917
918         scnprintf(file, PATH_MAX, "%s/level", path);
919         if (sysfs__read_int(file, (int *) &cache->level))
920                 return -1;
921
922         scnprintf(file, PATH_MAX, "%s/coherency_line_size", path);
923         if (sysfs__read_int(file, (int *) &cache->line_size))
924                 return -1;
925
926         scnprintf(file, PATH_MAX, "%s/number_of_sets", path);
927         if (sysfs__read_int(file, (int *) &cache->sets))
928                 return -1;
929
930         scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path);
931         if (sysfs__read_int(file, (int *) &cache->ways))
932                 return -1;
933
934         scnprintf(file, PATH_MAX, "%s/type", path);
935         if (sysfs__read_str(file, &cache->type, &len))
936                 return -1;
937
938         cache->type[len] = 0;
939         cache->type = rtrim(cache->type);
940
941         scnprintf(file, PATH_MAX, "%s/size", path);
942         if (sysfs__read_str(file, &cache->size, &len)) {
943                 free(cache->type);
944                 return -1;
945         }
946
947         cache->size[len] = 0;
948         cache->size = rtrim(cache->size);
949
950         scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
951         if (sysfs__read_str(file, &cache->map, &len)) {
952                 free(cache->size);
953                 free(cache->type);
954                 return -1;
955         }
956
957         cache->map[len] = 0;
958         cache->map = rtrim(cache->map);
959         return 0;
960 }
961
962 static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c)
963 {
964         fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map);
965 }
966
967 static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp)
968 {
969         u32 i, cnt = 0;
970         long ncpus;
971         u32 nr, cpu;
972         u16 level;
973
974         ncpus = sysconf(_SC_NPROCESSORS_CONF);
975         if (ncpus < 0)
976                 return -1;
977
978         nr = (u32)(ncpus & UINT_MAX);
979
980         for (cpu = 0; cpu < nr; cpu++) {
981                 for (level = 0; level < 10; level++) {
982                         struct cpu_cache_level c;
983                         int err;
984
985                         err = cpu_cache_level__read(&c, cpu, level);
986                         if (err < 0)
987                                 return err;
988
989                         if (err == 1)
990                                 break;
991
992                         for (i = 0; i < cnt; i++) {
993                                 if (cpu_cache_level__cmp(&c, &caches[i]))
994                                         break;
995                         }
996
997                         if (i == cnt)
998                                 caches[cnt++] = c;
999                         else
1000                                 cpu_cache_level__free(&c);
1001
1002                         if (WARN_ONCE(cnt == size, "way too many cpu caches.."))
1003                                 goto out;
1004                 }
1005         }
1006  out:
1007         *cntp = cnt;
1008         return 0;
1009 }
1010
1011 #define MAX_CACHES (MAX_NR_CPUS * 4)
1012
1013 static int write_cache(int fd, struct perf_header *h __maybe_unused,
1014                           struct perf_evlist *evlist __maybe_unused)
1015 {
1016         struct cpu_cache_level caches[MAX_CACHES];
1017         u32 cnt = 0, i, version = 1;
1018         int ret;
1019
1020         ret = build_caches(caches, MAX_CACHES, &cnt);
1021         if (ret)
1022                 goto out;
1023
1024         qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort);
1025
1026         ret = do_write(fd, &version, sizeof(u32));
1027         if (ret < 0)
1028                 goto out;
1029
1030         ret = do_write(fd, &cnt, sizeof(u32));
1031         if (ret < 0)
1032                 goto out;
1033
1034         for (i = 0; i < cnt; i++) {
1035                 struct cpu_cache_level *c = &caches[i];
1036
1037                 #define _W(v)                                   \
1038                         ret = do_write(fd, &c->v, sizeof(u32)); \
1039                         if (ret < 0)                            \
1040                                 goto out;
1041
1042                 _W(level)
1043                 _W(line_size)
1044                 _W(sets)
1045                 _W(ways)
1046                 #undef _W
1047
1048                 #define _W(v)                                           \
1049                         ret = do_write_string(fd, (const char *) c->v); \
1050                         if (ret < 0)                                    \
1051                                 goto out;
1052
1053                 _W(type)
1054                 _W(size)
1055                 _W(map)
1056                 #undef _W
1057         }
1058
1059 out:
1060         for (i = 0; i < cnt; i++)
1061                 cpu_cache_level__free(&caches[i]);
1062         return ret;
1063 }
1064
1065 static int write_stat(int fd __maybe_unused,
1066                       struct perf_header *h __maybe_unused,
1067                       struct perf_evlist *evlist __maybe_unused)
1068 {
1069         return 0;
1070 }
1071
1072 static void print_hostname(struct perf_header *ph, int fd __maybe_unused,
1073                            FILE *fp)
1074 {
1075         fprintf(fp, "# hostname : %s\n", ph->env.hostname);
1076 }
1077
1078 static void print_osrelease(struct perf_header *ph, int fd __maybe_unused,
1079                             FILE *fp)
1080 {
1081         fprintf(fp, "# os release : %s\n", ph->env.os_release);
1082 }
1083
1084 static void print_arch(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
1085 {
1086         fprintf(fp, "# arch : %s\n", ph->env.arch);
1087 }
1088
1089 static void print_cpudesc(struct perf_header *ph, int fd __maybe_unused,
1090                           FILE *fp)
1091 {
1092         fprintf(fp, "# cpudesc : %s\n", ph->env.cpu_desc);
1093 }
1094
1095 static void print_nrcpus(struct perf_header *ph, int fd __maybe_unused,
1096                          FILE *fp)
1097 {
1098         fprintf(fp, "# nrcpus online : %u\n", ph->env.nr_cpus_online);
1099         fprintf(fp, "# nrcpus avail : %u\n", ph->env.nr_cpus_avail);
1100 }
1101
1102 static void print_version(struct perf_header *ph, int fd __maybe_unused,
1103                           FILE *fp)
1104 {
1105         fprintf(fp, "# perf version : %s\n", ph->env.version);
1106 }
1107
1108 static void print_cmdline(struct perf_header *ph, int fd __maybe_unused,
1109                           FILE *fp)
1110 {
1111         int nr, i;
1112
1113         nr = ph->env.nr_cmdline;
1114
1115         fprintf(fp, "# cmdline : ");
1116
1117         for (i = 0; i < nr; i++)
1118                 fprintf(fp, "%s ", ph->env.cmdline_argv[i]);
1119         fputc('\n', fp);
1120 }
1121
1122 static void print_cpu_topology(struct perf_header *ph, int fd __maybe_unused,
1123                                FILE *fp)
1124 {
1125         int nr, i;
1126         char *str;
1127         int cpu_nr = ph->env.nr_cpus_online;
1128
1129         nr = ph->env.nr_sibling_cores;
1130         str = ph->env.sibling_cores;
1131
1132         for (i = 0; i < nr; i++) {
1133                 fprintf(fp, "# sibling cores   : %s\n", str);
1134                 str += strlen(str) + 1;
1135         }
1136
1137         nr = ph->env.nr_sibling_threads;
1138         str = ph->env.sibling_threads;
1139
1140         for (i = 0; i < nr; i++) {
1141                 fprintf(fp, "# sibling threads : %s\n", str);
1142                 str += strlen(str) + 1;
1143         }
1144
1145         if (ph->env.cpu != NULL) {
1146                 for (i = 0; i < cpu_nr; i++)
1147                         fprintf(fp, "# CPU %d: Core ID %d, Socket ID %d\n", i,
1148                                 ph->env.cpu[i].core_id, ph->env.cpu[i].socket_id);
1149         } else
1150                 fprintf(fp, "# Core ID and Socket ID information is not available\n");
1151 }
1152
1153 static void free_event_desc(struct perf_evsel *events)
1154 {
1155         struct perf_evsel *evsel;
1156
1157         if (!events)
1158                 return;
1159
1160         for (evsel = events; evsel->attr.size; evsel++) {
1161                 zfree(&evsel->name);
1162                 zfree(&evsel->id);
1163         }
1164
1165         free(events);
1166 }
1167
1168 static struct perf_evsel *
1169 read_event_desc(struct perf_header *ph, int fd)
1170 {
1171         struct perf_evsel *evsel, *events = NULL;
1172         u64 *id;
1173         void *buf = NULL;
1174         u32 nre, sz, nr, i, j;
1175         ssize_t ret;
1176         size_t msz;
1177
1178         /* number of events */
1179         ret = readn(fd, &nre, sizeof(nre));
1180         if (ret != (ssize_t)sizeof(nre))
1181                 goto error;
1182
1183         if (ph->needs_swap)
1184                 nre = bswap_32(nre);
1185
1186         ret = readn(fd, &sz, sizeof(sz));
1187         if (ret != (ssize_t)sizeof(sz))
1188                 goto error;
1189
1190         if (ph->needs_swap)
1191                 sz = bswap_32(sz);
1192
1193         /* buffer to hold on file attr struct */
1194         buf = malloc(sz);
1195         if (!buf)
1196                 goto error;
1197
1198         /* the last event terminates with evsel->attr.size == 0: */
1199         events = calloc(nre + 1, sizeof(*events));
1200         if (!events)
1201                 goto error;
1202
1203         msz = sizeof(evsel->attr);
1204         if (sz < msz)
1205                 msz = sz;
1206
1207         for (i = 0, evsel = events; i < nre; evsel++, i++) {
1208                 evsel->idx = i;
1209
1210                 /*
1211                  * must read entire on-file attr struct to
1212                  * sync up with layout.
1213                  */
1214                 ret = readn(fd, buf, sz);
1215                 if (ret != (ssize_t)sz)
1216                         goto error;
1217
1218                 if (ph->needs_swap)
1219                         perf_event__attr_swap(buf);
1220
1221                 memcpy(&evsel->attr, buf, msz);
1222
1223                 ret = readn(fd, &nr, sizeof(nr));
1224                 if (ret != (ssize_t)sizeof(nr))
1225                         goto error;
1226
1227                 if (ph->needs_swap) {
1228                         nr = bswap_32(nr);
1229                         evsel->needs_swap = true;
1230                 }
1231
1232                 evsel->name = do_read_string(fd, ph);
1233
1234                 if (!nr)
1235                         continue;
1236
1237                 id = calloc(nr, sizeof(*id));
1238                 if (!id)
1239                         goto error;
1240                 evsel->ids = nr;
1241                 evsel->id = id;
1242
1243                 for (j = 0 ; j < nr; j++) {
1244                         ret = readn(fd, id, sizeof(*id));
1245                         if (ret != (ssize_t)sizeof(*id))
1246                                 goto error;
1247                         if (ph->needs_swap)
1248                                 *id = bswap_64(*id);
1249                         id++;
1250                 }
1251         }
1252 out:
1253         free(buf);
1254         return events;
1255 error:
1256         free_event_desc(events);
1257         events = NULL;
1258         goto out;
1259 }
1260
1261 static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val,
1262                                 void *priv __attribute__((unused)))
1263 {
1264         return fprintf(fp, ", %s = %s", name, val);
1265 }
1266
1267 static void print_event_desc(struct perf_header *ph, int fd, FILE *fp)
1268 {
1269         struct perf_evsel *evsel, *events = read_event_desc(ph, fd);
1270         u32 j;
1271         u64 *id;
1272
1273         if (!events) {
1274                 fprintf(fp, "# event desc: not available or unable to read\n");
1275                 return;
1276         }
1277
1278         for (evsel = events; evsel->attr.size; evsel++) {
1279                 fprintf(fp, "# event : name = %s, ", evsel->name);
1280
1281                 if (evsel->ids) {
1282                         fprintf(fp, ", id = {");
1283                         for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) {
1284                                 if (j)
1285                                         fputc(',', fp);
1286                                 fprintf(fp, " %"PRIu64, *id);
1287                         }
1288                         fprintf(fp, " }");
1289                 }
1290
1291                 perf_event_attr__fprintf(fp, &evsel->attr, __desc_attr__fprintf, NULL);
1292
1293                 fputc('\n', fp);
1294         }
1295
1296         free_event_desc(events);
1297 }
1298
1299 static void print_total_mem(struct perf_header *ph, int fd __maybe_unused,
1300                             FILE *fp)
1301 {
1302         fprintf(fp, "# total memory : %Lu kB\n", ph->env.total_mem);
1303 }
1304
1305 static void print_numa_topology(struct perf_header *ph, int fd __maybe_unused,
1306                                 FILE *fp)
1307 {
1308         int i;
1309         struct numa_node *n;
1310
1311         for (i = 0; i < ph->env.nr_numa_nodes; i++) {
1312                 n = &ph->env.numa_nodes[i];
1313
1314                 fprintf(fp, "# node%u meminfo  : total = %"PRIu64" kB,"
1315                             " free = %"PRIu64" kB\n",
1316                         n->node, n->mem_total, n->mem_free);
1317
1318                 fprintf(fp, "# node%u cpu list : ", n->node);
1319                 cpu_map__fprintf(n->map, fp);
1320         }
1321 }
1322
1323 static void print_cpuid(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
1324 {
1325         fprintf(fp, "# cpuid : %s\n", ph->env.cpuid);
1326 }
1327
1328 static void print_branch_stack(struct perf_header *ph __maybe_unused,
1329                                int fd __maybe_unused, FILE *fp)
1330 {
1331         fprintf(fp, "# contains samples with branch stack\n");
1332 }
1333
1334 static void print_auxtrace(struct perf_header *ph __maybe_unused,
1335                            int fd __maybe_unused, FILE *fp)
1336 {
1337         fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n");
1338 }
1339
1340 static void print_stat(struct perf_header *ph __maybe_unused,
1341                        int fd __maybe_unused, FILE *fp)
1342 {
1343         fprintf(fp, "# contains stat data\n");
1344 }
1345
1346 static void print_cache(struct perf_header *ph __maybe_unused,
1347                         int fd __maybe_unused, FILE *fp __maybe_unused)
1348 {
1349         int i;
1350
1351         fprintf(fp, "# CPU cache info:\n");
1352         for (i = 0; i < ph->env.caches_cnt; i++) {
1353                 fprintf(fp, "#  ");
1354                 cpu_cache_level__fprintf(fp, &ph->env.caches[i]);
1355         }
1356 }
1357
1358 static void print_pmu_mappings(struct perf_header *ph, int fd __maybe_unused,
1359                                FILE *fp)
1360 {
1361         const char *delimiter = "# pmu mappings: ";
1362         char *str, *tmp;
1363         u32 pmu_num;
1364         u32 type;
1365
1366         pmu_num = ph->env.nr_pmu_mappings;
1367         if (!pmu_num) {
1368                 fprintf(fp, "# pmu mappings: not available\n");
1369                 return;
1370         }
1371
1372         str = ph->env.pmu_mappings;
1373
1374         while (pmu_num) {
1375                 type = strtoul(str, &tmp, 0);
1376                 if (*tmp != ':')
1377                         goto error;
1378
1379                 str = tmp + 1;
1380                 fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
1381
1382                 delimiter = ", ";
1383                 str += strlen(str) + 1;
1384                 pmu_num--;
1385         }
1386
1387         fprintf(fp, "\n");
1388
1389         if (!pmu_num)
1390                 return;
1391 error:
1392         fprintf(fp, "# pmu mappings: unable to read\n");
1393 }
1394
1395 static void print_group_desc(struct perf_header *ph, int fd __maybe_unused,
1396                              FILE *fp)
1397 {
1398         struct perf_session *session;
1399         struct perf_evsel *evsel;
1400         u32 nr = 0;
1401
1402         session = container_of(ph, struct perf_session, header);
1403
1404         evlist__for_each_entry(session->evlist, evsel) {
1405                 if (perf_evsel__is_group_leader(evsel) &&
1406                     evsel->nr_members > 1) {
1407                         fprintf(fp, "# group: %s{%s", evsel->group_name ?: "",
1408                                 perf_evsel__name(evsel));
1409
1410                         nr = evsel->nr_members - 1;
1411                 } else if (nr) {
1412                         fprintf(fp, ",%s", perf_evsel__name(evsel));
1413
1414                         if (--nr == 0)
1415                                 fprintf(fp, "}\n");
1416                 }
1417         }
1418 }
1419
1420 static int __event_process_build_id(struct build_id_event *bev,
1421                                     char *filename,
1422                                     struct perf_session *session)
1423 {
1424         int err = -1;
1425         struct machine *machine;
1426         u16 cpumode;
1427         struct dso *dso;
1428         enum dso_kernel_type dso_type;
1429
1430         machine = perf_session__findnew_machine(session, bev->pid);
1431         if (!machine)
1432                 goto out;
1433
1434         cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1435
1436         switch (cpumode) {
1437         case PERF_RECORD_MISC_KERNEL:
1438                 dso_type = DSO_TYPE_KERNEL;
1439                 break;
1440         case PERF_RECORD_MISC_GUEST_KERNEL:
1441                 dso_type = DSO_TYPE_GUEST_KERNEL;
1442                 break;
1443         case PERF_RECORD_MISC_USER:
1444         case PERF_RECORD_MISC_GUEST_USER:
1445                 dso_type = DSO_TYPE_USER;
1446                 break;
1447         default:
1448                 goto out;
1449         }
1450
1451         dso = machine__findnew_dso(machine, filename);
1452         if (dso != NULL) {
1453                 char sbuild_id[SBUILD_ID_SIZE];
1454
1455                 dso__set_build_id(dso, &bev->build_id);
1456
1457                 if (dso_type != DSO_TYPE_USER) {
1458                         struct kmod_path m = { .name = NULL, };
1459
1460                         if (!kmod_path__parse_name(&m, filename) && m.kmod)
1461                                 dso__set_short_name(dso, strdup(m.name), true);
1462                         else
1463                                 dso->kernel = dso_type;
1464
1465                         free(m.name);
1466                 }
1467
1468                 build_id__sprintf(dso->build_id, sizeof(dso->build_id),
1469                                   sbuild_id);
1470                 pr_debug("build id event received for %s: %s\n",
1471                          dso->long_name, sbuild_id);
1472                 dso__put(dso);
1473         }
1474
1475         err = 0;
1476 out:
1477         return err;
1478 }
1479
1480 static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
1481                                                  int input, u64 offset, u64 size)
1482 {
1483         struct perf_session *session = container_of(header, struct perf_session, header);
1484         struct {
1485                 struct perf_event_header   header;
1486                 u8                         build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
1487                 char                       filename[0];
1488         } old_bev;
1489         struct build_id_event bev;
1490         char filename[PATH_MAX];
1491         u64 limit = offset + size;
1492
1493         while (offset < limit) {
1494                 ssize_t len;
1495
1496                 if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
1497                         return -1;
1498
1499                 if (header->needs_swap)
1500                         perf_event_header__bswap(&old_bev.header);
1501
1502                 len = old_bev.header.size - sizeof(old_bev);
1503                 if (readn(input, filename, len) != len)
1504                         return -1;
1505
1506                 bev.header = old_bev.header;
1507
1508                 /*
1509                  * As the pid is the missing value, we need to fill
1510                  * it properly. The header.misc value give us nice hint.
1511                  */
1512                 bev.pid = HOST_KERNEL_ID;
1513                 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
1514                     bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
1515                         bev.pid = DEFAULT_GUEST_KERNEL_ID;
1516
1517                 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
1518                 __event_process_build_id(&bev, filename, session);
1519
1520                 offset += bev.header.size;
1521         }
1522
1523         return 0;
1524 }
1525
1526 static int perf_header__read_build_ids(struct perf_header *header,
1527                                        int input, u64 offset, u64 size)
1528 {
1529         struct perf_session *session = container_of(header, struct perf_session, header);
1530         struct build_id_event bev;
1531         char filename[PATH_MAX];
1532         u64 limit = offset + size, orig_offset = offset;
1533         int err = -1;
1534
1535         while (offset < limit) {
1536                 ssize_t len;
1537
1538                 if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
1539                         goto out;
1540
1541                 if (header->needs_swap)
1542                         perf_event_header__bswap(&bev.header);
1543
1544                 len = bev.header.size - sizeof(bev);
1545                 if (readn(input, filename, len) != len)
1546                         goto out;
1547                 /*
1548                  * The a1645ce1 changeset:
1549                  *
1550                  * "perf: 'perf kvm' tool for monitoring guest performance from host"
1551                  *
1552                  * Added a field to struct build_id_event that broke the file
1553                  * format.
1554                  *
1555                  * Since the kernel build-id is the first entry, process the
1556                  * table using the old format if the well known
1557                  * '[kernel.kallsyms]' string for the kernel build-id has the
1558                  * first 4 characters chopped off (where the pid_t sits).
1559                  */
1560                 if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
1561                         if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
1562                                 return -1;
1563                         return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
1564                 }
1565
1566                 __event_process_build_id(&bev, filename, session);
1567
1568                 offset += bev.header.size;
1569         }
1570         err = 0;
1571 out:
1572         return err;
1573 }
1574
1575 static int process_tracing_data(struct perf_file_section *section __maybe_unused,
1576                                 struct perf_header *ph __maybe_unused,
1577                                 int fd, void *data)
1578 {
1579         ssize_t ret = trace_report(fd, data, false);
1580         return ret < 0 ? -1 : 0;
1581 }
1582
1583 static int process_build_id(struct perf_file_section *section,
1584                             struct perf_header *ph, int fd,
1585                             void *data __maybe_unused)
1586 {
1587         if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
1588                 pr_debug("Failed to read buildids, continuing...\n");
1589         return 0;
1590 }
1591
1592 static int process_hostname(struct perf_file_section *section __maybe_unused,
1593                             struct perf_header *ph, int fd,
1594                             void *data __maybe_unused)
1595 {
1596         ph->env.hostname = do_read_string(fd, ph);
1597         return ph->env.hostname ? 0 : -ENOMEM;
1598 }
1599
1600 static int process_osrelease(struct perf_file_section *section __maybe_unused,
1601                              struct perf_header *ph, int fd,
1602                              void *data __maybe_unused)
1603 {
1604         ph->env.os_release = do_read_string(fd, ph);
1605         return ph->env.os_release ? 0 : -ENOMEM;
1606 }
1607
1608 static int process_version(struct perf_file_section *section __maybe_unused,
1609                            struct perf_header *ph, int fd,
1610                            void *data __maybe_unused)
1611 {
1612         ph->env.version = do_read_string(fd, ph);
1613         return ph->env.version ? 0 : -ENOMEM;
1614 }
1615
1616 static int process_arch(struct perf_file_section *section __maybe_unused,
1617                         struct perf_header *ph, int fd,
1618                         void *data __maybe_unused)
1619 {
1620         ph->env.arch = do_read_string(fd, ph);
1621         return ph->env.arch ? 0 : -ENOMEM;
1622 }
1623
1624 static int process_nrcpus(struct perf_file_section *section __maybe_unused,
1625                           struct perf_header *ph, int fd,
1626                           void *data __maybe_unused)
1627 {
1628         ssize_t ret;
1629         u32 nr;
1630
1631         ret = readn(fd, &nr, sizeof(nr));
1632         if (ret != sizeof(nr))
1633                 return -1;
1634
1635         if (ph->needs_swap)
1636                 nr = bswap_32(nr);
1637
1638         ph->env.nr_cpus_avail = nr;
1639
1640         ret = readn(fd, &nr, sizeof(nr));
1641         if (ret != sizeof(nr))
1642                 return -1;
1643
1644         if (ph->needs_swap)
1645                 nr = bswap_32(nr);
1646
1647         ph->env.nr_cpus_online = nr;
1648         return 0;
1649 }
1650
1651 static int process_cpudesc(struct perf_file_section *section __maybe_unused,
1652                            struct perf_header *ph, int fd,
1653                            void *data __maybe_unused)
1654 {
1655         ph->env.cpu_desc = do_read_string(fd, ph);
1656         return ph->env.cpu_desc ? 0 : -ENOMEM;
1657 }
1658
1659 static int process_cpuid(struct perf_file_section *section __maybe_unused,
1660                          struct perf_header *ph,  int fd,
1661                          void *data __maybe_unused)
1662 {
1663         ph->env.cpuid = do_read_string(fd, ph);
1664         return ph->env.cpuid ? 0 : -ENOMEM;
1665 }
1666
1667 static int process_total_mem(struct perf_file_section *section __maybe_unused,
1668                              struct perf_header *ph, int fd,
1669                              void *data __maybe_unused)
1670 {
1671         uint64_t mem;
1672         ssize_t ret;
1673
1674         ret = readn(fd, &mem, sizeof(mem));
1675         if (ret != sizeof(mem))
1676                 return -1;
1677
1678         if (ph->needs_swap)
1679                 mem = bswap_64(mem);
1680
1681         ph->env.total_mem = mem;
1682         return 0;
1683 }
1684
1685 static struct perf_evsel *
1686 perf_evlist__find_by_index(struct perf_evlist *evlist, int idx)
1687 {
1688         struct perf_evsel *evsel;
1689
1690         evlist__for_each_entry(evlist, evsel) {
1691                 if (evsel->idx == idx)
1692                         return evsel;
1693         }
1694
1695         return NULL;
1696 }
1697
1698 static void
1699 perf_evlist__set_event_name(struct perf_evlist *evlist,
1700                             struct perf_evsel *event)
1701 {
1702         struct perf_evsel *evsel;
1703
1704         if (!event->name)
1705                 return;
1706
1707         evsel = perf_evlist__find_by_index(evlist, event->idx);
1708         if (!evsel)
1709                 return;
1710
1711         if (evsel->name)
1712                 return;
1713
1714         evsel->name = strdup(event->name);
1715 }
1716
1717 static int
1718 process_event_desc(struct perf_file_section *section __maybe_unused,
1719                    struct perf_header *header, int fd,
1720                    void *data __maybe_unused)
1721 {
1722         struct perf_session *session;
1723         struct perf_evsel *evsel, *events = read_event_desc(header, fd);
1724
1725         if (!events)
1726                 return 0;
1727
1728         session = container_of(header, struct perf_session, header);
1729         for (evsel = events; evsel->attr.size; evsel++)
1730                 perf_evlist__set_event_name(session->evlist, evsel);
1731
1732         free_event_desc(events);
1733
1734         return 0;
1735 }
1736
1737 static int process_cmdline(struct perf_file_section *section,
1738                            struct perf_header *ph, int fd,
1739                            void *data __maybe_unused)
1740 {
1741         ssize_t ret;
1742         char *str, *cmdline = NULL, **argv = NULL;
1743         u32 nr, i, len = 0;
1744
1745         ret = readn(fd, &nr, sizeof(nr));
1746         if (ret != sizeof(nr))
1747                 return -1;
1748
1749         if (ph->needs_swap)
1750                 nr = bswap_32(nr);
1751
1752         ph->env.nr_cmdline = nr;
1753
1754         cmdline = zalloc(section->size + nr + 1);
1755         if (!cmdline)
1756                 return -1;
1757
1758         argv = zalloc(sizeof(char *) * (nr + 1));
1759         if (!argv)
1760                 goto error;
1761
1762         for (i = 0; i < nr; i++) {
1763                 str = do_read_string(fd, ph);
1764                 if (!str)
1765                         goto error;
1766
1767                 argv[i] = cmdline + len;
1768                 memcpy(argv[i], str, strlen(str) + 1);
1769                 len += strlen(str) + 1;
1770                 free(str);
1771         }
1772         ph->env.cmdline = cmdline;
1773         ph->env.cmdline_argv = (const char **) argv;
1774         return 0;
1775
1776 error:
1777         free(argv);
1778         free(cmdline);
1779         return -1;
1780 }
1781
1782 static int process_cpu_topology(struct perf_file_section *section,
1783                                 struct perf_header *ph, int fd,
1784                                 void *data __maybe_unused)
1785 {
1786         ssize_t ret;
1787         u32 nr, i;
1788         char *str;
1789         struct strbuf sb;
1790         int cpu_nr = ph->env.nr_cpus_online;
1791         u64 size = 0;
1792
1793         ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
1794         if (!ph->env.cpu)
1795                 return -1;
1796
1797         ret = readn(fd, &nr, sizeof(nr));
1798         if (ret != sizeof(nr))
1799                 goto free_cpu;
1800
1801         if (ph->needs_swap)
1802                 nr = bswap_32(nr);
1803
1804         ph->env.nr_sibling_cores = nr;
1805         size += sizeof(u32);
1806         if (strbuf_init(&sb, 128) < 0)
1807                 goto free_cpu;
1808
1809         for (i = 0; i < nr; i++) {
1810                 str = do_read_string(fd, ph);
1811                 if (!str)
1812                         goto error;
1813
1814                 /* include a NULL character at the end */
1815                 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
1816                         goto error;
1817                 size += string_size(str);
1818                 free(str);
1819         }
1820         ph->env.sibling_cores = strbuf_detach(&sb, NULL);
1821
1822         ret = readn(fd, &nr, sizeof(nr));
1823         if (ret != sizeof(nr))
1824                 return -1;
1825
1826         if (ph->needs_swap)
1827                 nr = bswap_32(nr);
1828
1829         ph->env.nr_sibling_threads = nr;
1830         size += sizeof(u32);
1831
1832         for (i = 0; i < nr; i++) {
1833                 str = do_read_string(fd, ph);
1834                 if (!str)
1835                         goto error;
1836
1837                 /* include a NULL character at the end */
1838                 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
1839                         goto error;
1840                 size += string_size(str);
1841                 free(str);
1842         }
1843         ph->env.sibling_threads = strbuf_detach(&sb, NULL);
1844
1845         /*
1846          * The header may be from old perf,
1847          * which doesn't include core id and socket id information.
1848          */
1849         if (section->size <= size) {
1850                 zfree(&ph->env.cpu);
1851                 return 0;
1852         }
1853
1854         for (i = 0; i < (u32)cpu_nr; i++) {
1855                 ret = readn(fd, &nr, sizeof(nr));
1856                 if (ret != sizeof(nr))
1857                         goto free_cpu;
1858
1859                 if (ph->needs_swap)
1860                         nr = bswap_32(nr);
1861
1862                 ph->env.cpu[i].core_id = nr;
1863
1864                 ret = readn(fd, &nr, sizeof(nr));
1865                 if (ret != sizeof(nr))
1866                         goto free_cpu;
1867
1868                 if (ph->needs_swap)
1869                         nr = bswap_32(nr);
1870
1871                 if (nr > (u32)cpu_nr) {
1872                         pr_debug("socket_id number is too big."
1873                                  "You may need to upgrade the perf tool.\n");
1874                         goto free_cpu;
1875                 }
1876
1877                 ph->env.cpu[i].socket_id = nr;
1878         }
1879
1880         return 0;
1881
1882 error:
1883         strbuf_release(&sb);
1884 free_cpu:
1885         zfree(&ph->env.cpu);
1886         return -1;
1887 }
1888
1889 static int process_numa_topology(struct perf_file_section *section __maybe_unused,
1890                                  struct perf_header *ph, int fd,
1891                                  void *data __maybe_unused)
1892 {
1893         struct numa_node *nodes, *n;
1894         ssize_t ret;
1895         u32 nr, i;
1896         char *str;
1897
1898         /* nr nodes */
1899         ret = readn(fd, &nr, sizeof(nr));
1900         if (ret != sizeof(nr))
1901                 return -1;
1902
1903         if (ph->needs_swap)
1904                 nr = bswap_32(nr);
1905
1906         nodes = zalloc(sizeof(*nodes) * nr);
1907         if (!nodes)
1908                 return -ENOMEM;
1909
1910         for (i = 0; i < nr; i++) {
1911                 n = &nodes[i];
1912
1913                 /* node number */
1914                 ret = readn(fd, &n->node, sizeof(u32));
1915                 if (ret != sizeof(n->node))
1916                         goto error;
1917
1918                 ret = readn(fd, &n->mem_total, sizeof(u64));
1919                 if (ret != sizeof(u64))
1920                         goto error;
1921
1922                 ret = readn(fd, &n->mem_free, sizeof(u64));
1923                 if (ret != sizeof(u64))
1924                         goto error;
1925
1926                 if (ph->needs_swap) {
1927                         n->node      = bswap_32(n->node);
1928                         n->mem_total = bswap_64(n->mem_total);
1929                         n->mem_free  = bswap_64(n->mem_free);
1930                 }
1931
1932                 str = do_read_string(fd, ph);
1933                 if (!str)
1934                         goto error;
1935
1936                 n->map = cpu_map__new(str);
1937                 if (!n->map)
1938                         goto error;
1939
1940                 free(str);
1941         }
1942         ph->env.nr_numa_nodes = nr;
1943         ph->env.numa_nodes = nodes;
1944         return 0;
1945
1946 error:
1947         free(nodes);
1948         return -1;
1949 }
1950
1951 static int process_pmu_mappings(struct perf_file_section *section __maybe_unused,
1952                                 struct perf_header *ph, int fd,
1953                                 void *data __maybe_unused)
1954 {
1955         ssize_t ret;
1956         char *name;
1957         u32 pmu_num;
1958         u32 type;
1959         struct strbuf sb;
1960
1961         ret = readn(fd, &pmu_num, sizeof(pmu_num));
1962         if (ret != sizeof(pmu_num))
1963                 return -1;
1964
1965         if (ph->needs_swap)
1966                 pmu_num = bswap_32(pmu_num);
1967
1968         if (!pmu_num) {
1969                 pr_debug("pmu mappings not available\n");
1970                 return 0;
1971         }
1972
1973         ph->env.nr_pmu_mappings = pmu_num;
1974         if (strbuf_init(&sb, 128) < 0)
1975                 return -1;
1976
1977         while (pmu_num) {
1978                 if (readn(fd, &type, sizeof(type)) != sizeof(type))
1979                         goto error;
1980                 if (ph->needs_swap)
1981                         type = bswap_32(type);
1982
1983                 name = do_read_string(fd, ph);
1984                 if (!name)
1985                         goto error;
1986
1987                 if (strbuf_addf(&sb, "%u:%s", type, name) < 0)
1988                         goto error;
1989                 /* include a NULL character at the end */
1990                 if (strbuf_add(&sb, "", 1) < 0)
1991                         goto error;
1992
1993                 if (!strcmp(name, "msr"))
1994                         ph->env.msr_pmu_type = type;
1995
1996                 free(name);
1997                 pmu_num--;
1998         }
1999         ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
2000         return 0;
2001
2002 error:
2003         strbuf_release(&sb);
2004         return -1;
2005 }
2006
2007 static int process_group_desc(struct perf_file_section *section __maybe_unused,
2008                               struct perf_header *ph, int fd,
2009                               void *data __maybe_unused)
2010 {
2011         size_t ret = -1;
2012         u32 i, nr, nr_groups;
2013         struct perf_session *session;
2014         struct perf_evsel *evsel, *leader = NULL;
2015         struct group_desc {
2016                 char *name;
2017                 u32 leader_idx;
2018                 u32 nr_members;
2019         } *desc;
2020
2021         if (readn(fd, &nr_groups, sizeof(nr_groups)) != sizeof(nr_groups))
2022                 return -1;
2023
2024         if (ph->needs_swap)
2025                 nr_groups = bswap_32(nr_groups);
2026
2027         ph->env.nr_groups = nr_groups;
2028         if (!nr_groups) {
2029                 pr_debug("group desc not available\n");
2030                 return 0;
2031         }
2032
2033         desc = calloc(nr_groups, sizeof(*desc));
2034         if (!desc)
2035                 return -1;
2036
2037         for (i = 0; i < nr_groups; i++) {
2038                 desc[i].name = do_read_string(fd, ph);
2039                 if (!desc[i].name)
2040                         goto out_free;
2041
2042                 if (readn(fd, &desc[i].leader_idx, sizeof(u32)) != sizeof(u32))
2043                         goto out_free;
2044
2045                 if (readn(fd, &desc[i].nr_members, sizeof(u32)) != sizeof(u32))
2046                         goto out_free;
2047
2048                 if (ph->needs_swap) {
2049                         desc[i].leader_idx = bswap_32(desc[i].leader_idx);
2050                         desc[i].nr_members = bswap_32(desc[i].nr_members);
2051                 }
2052         }
2053
2054         /*
2055          * Rebuild group relationship based on the group_desc
2056          */
2057         session = container_of(ph, struct perf_session, header);
2058         session->evlist->nr_groups = nr_groups;
2059
2060         i = nr = 0;
2061         evlist__for_each_entry(session->evlist, evsel) {
2062                 if (evsel->idx == (int) desc[i].leader_idx) {
2063                         evsel->leader = evsel;
2064                         /* {anon_group} is a dummy name */
2065                         if (strcmp(desc[i].name, "{anon_group}")) {
2066                                 evsel->group_name = desc[i].name;
2067                                 desc[i].name = NULL;
2068                         }
2069                         evsel->nr_members = desc[i].nr_members;
2070
2071                         if (i >= nr_groups || nr > 0) {
2072                                 pr_debug("invalid group desc\n");
2073                                 goto out_free;
2074                         }
2075
2076                         leader = evsel;
2077                         nr = evsel->nr_members - 1;
2078                         i++;
2079                 } else if (nr) {
2080                         /* This is a group member */
2081                         evsel->leader = leader;
2082
2083                         nr--;
2084                 }
2085         }
2086
2087         if (i != nr_groups || nr != 0) {
2088                 pr_debug("invalid group desc\n");
2089                 goto out_free;
2090         }
2091
2092         ret = 0;
2093 out_free:
2094         for (i = 0; i < nr_groups; i++)
2095                 zfree(&desc[i].name);
2096         free(desc);
2097
2098         return ret;
2099 }
2100
2101 static int process_auxtrace(struct perf_file_section *section,
2102                             struct perf_header *ph, int fd,
2103                             void *data __maybe_unused)
2104 {
2105         struct perf_session *session;
2106         int err;
2107
2108         session = container_of(ph, struct perf_session, header);
2109
2110         err = auxtrace_index__process(fd, section->size, session,
2111                                       ph->needs_swap);
2112         if (err < 0)
2113                 pr_err("Failed to process auxtrace index\n");
2114         return err;
2115 }
2116
2117 static int process_cache(struct perf_file_section *section __maybe_unused,
2118                          struct perf_header *ph __maybe_unused, int fd __maybe_unused,
2119                          void *data __maybe_unused)
2120 {
2121         struct cpu_cache_level *caches;
2122         u32 cnt, i, version;
2123
2124         if (readn(fd, &version, sizeof(version)) != sizeof(version))
2125                 return -1;
2126
2127         if (ph->needs_swap)
2128                 version = bswap_32(version);
2129
2130         if (version != 1)
2131                 return -1;
2132
2133         if (readn(fd, &cnt, sizeof(cnt)) != sizeof(cnt))
2134                 return -1;
2135
2136         if (ph->needs_swap)
2137                 cnt = bswap_32(cnt);
2138
2139         caches = zalloc(sizeof(*caches) * cnt);
2140         if (!caches)
2141                 return -1;
2142
2143         for (i = 0; i < cnt; i++) {
2144                 struct cpu_cache_level c;
2145
2146                 #define _R(v)                                           \
2147                         if (readn(fd, &c.v, sizeof(u32)) != sizeof(u32))\
2148                                 goto out_free_caches;                   \
2149                         if (ph->needs_swap)                             \
2150                                 c.v = bswap_32(c.v);                    \
2151
2152                 _R(level)
2153                 _R(line_size)
2154                 _R(sets)
2155                 _R(ways)
2156                 #undef _R
2157
2158                 #define _R(v)                           \
2159                         c.v = do_read_string(fd, ph);   \
2160                         if (!c.v)                       \
2161                                 goto out_free_caches;
2162
2163                 _R(type)
2164                 _R(size)
2165                 _R(map)
2166                 #undef _R
2167
2168                 caches[i] = c;
2169         }
2170
2171         ph->env.caches = caches;
2172         ph->env.caches_cnt = cnt;
2173         return 0;
2174 out_free_caches:
2175         free(caches);
2176         return -1;
2177 }
2178
2179 struct feature_ops {
2180         int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist);
2181         void (*print)(struct perf_header *h, int fd, FILE *fp);
2182         int (*process)(struct perf_file_section *section,
2183                        struct perf_header *h, int fd, void *data);
2184         const char *name;
2185         bool full_only;
2186 };
2187
2188 #define FEAT_OPA(n, func) \
2189         [n] = { .name = #n, .write = write_##func, .print = print_##func }
2190 #define FEAT_OPP(n, func) \
2191         [n] = { .name = #n, .write = write_##func, .print = print_##func, \
2192                 .process = process_##func }
2193 #define FEAT_OPF(n, func) \
2194         [n] = { .name = #n, .write = write_##func, .print = print_##func, \
2195                 .process = process_##func, .full_only = true }
2196
2197 /* feature_ops not implemented: */
2198 #define print_tracing_data      NULL
2199 #define print_build_id          NULL
2200
2201 static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
2202         FEAT_OPP(HEADER_TRACING_DATA,   tracing_data),
2203         FEAT_OPP(HEADER_BUILD_ID,       build_id),
2204         FEAT_OPP(HEADER_HOSTNAME,       hostname),
2205         FEAT_OPP(HEADER_OSRELEASE,      osrelease),
2206         FEAT_OPP(HEADER_VERSION,        version),
2207         FEAT_OPP(HEADER_ARCH,           arch),
2208         FEAT_OPP(HEADER_NRCPUS,         nrcpus),
2209         FEAT_OPP(HEADER_CPUDESC,        cpudesc),
2210         FEAT_OPP(HEADER_CPUID,          cpuid),
2211         FEAT_OPP(HEADER_TOTAL_MEM,      total_mem),
2212         FEAT_OPP(HEADER_EVENT_DESC,     event_desc),
2213         FEAT_OPP(HEADER_CMDLINE,        cmdline),
2214         FEAT_OPF(HEADER_CPU_TOPOLOGY,   cpu_topology),
2215         FEAT_OPF(HEADER_NUMA_TOPOLOGY,  numa_topology),
2216         FEAT_OPA(HEADER_BRANCH_STACK,   branch_stack),
2217         FEAT_OPP(HEADER_PMU_MAPPINGS,   pmu_mappings),
2218         FEAT_OPP(HEADER_GROUP_DESC,     group_desc),
2219         FEAT_OPP(HEADER_AUXTRACE,       auxtrace),
2220         FEAT_OPA(HEADER_STAT,           stat),
2221         FEAT_OPF(HEADER_CACHE,          cache),
2222 };
2223
2224 struct header_print_data {
2225         FILE *fp;
2226         bool full; /* extended list of headers */
2227 };
2228
2229 static int perf_file_section__fprintf_info(struct perf_file_section *section,
2230                                            struct perf_header *ph,
2231                                            int feat, int fd, void *data)
2232 {
2233         struct header_print_data *hd = data;
2234
2235         if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2236                 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2237                                 "%d, continuing...\n", section->offset, feat);
2238                 return 0;
2239         }
2240         if (feat >= HEADER_LAST_FEATURE) {
2241                 pr_warning("unknown feature %d\n", feat);
2242                 return 0;
2243         }
2244         if (!feat_ops[feat].print)
2245                 return 0;
2246
2247         if (!feat_ops[feat].full_only || hd->full)
2248                 feat_ops[feat].print(ph, fd, hd->fp);
2249         else
2250                 fprintf(hd->fp, "# %s info available, use -I to display\n",
2251                         feat_ops[feat].name);
2252
2253         return 0;
2254 }
2255
2256 int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
2257 {
2258         struct header_print_data hd;
2259         struct perf_header *header = &session->header;
2260         int fd = perf_data_file__fd(session->file);
2261         hd.fp = fp;
2262         hd.full = full;
2263
2264         perf_header__process_sections(header, fd, &hd,
2265                                       perf_file_section__fprintf_info);
2266         return 0;
2267 }
2268
2269 static int do_write_feat(int fd, struct perf_header *h, int type,
2270                          struct perf_file_section **p,
2271                          struct perf_evlist *evlist)
2272 {
2273         int err;
2274         int ret = 0;
2275
2276         if (perf_header__has_feat(h, type)) {
2277                 if (!feat_ops[type].write)
2278                         return -1;
2279
2280                 (*p)->offset = lseek(fd, 0, SEEK_CUR);
2281
2282                 err = feat_ops[type].write(fd, h, evlist);
2283                 if (err < 0) {
2284                         pr_debug("failed to write feature %d\n", type);
2285
2286                         /* undo anything written */
2287                         lseek(fd, (*p)->offset, SEEK_SET);
2288
2289                         return -1;
2290                 }
2291                 (*p)->size = lseek(fd, 0, SEEK_CUR) - (*p)->offset;
2292                 (*p)++;
2293         }
2294         return ret;
2295 }
2296
2297 static int perf_header__adds_write(struct perf_header *header,
2298                                    struct perf_evlist *evlist, int fd)
2299 {
2300         int nr_sections;
2301         struct perf_file_section *feat_sec, *p;
2302         int sec_size;
2303         u64 sec_start;
2304         int feat;
2305         int err;
2306
2307         nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2308         if (!nr_sections)
2309                 return 0;
2310
2311         feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
2312         if (feat_sec == NULL)
2313                 return -ENOMEM;
2314
2315         sec_size = sizeof(*feat_sec) * nr_sections;
2316
2317         sec_start = header->feat_offset;
2318         lseek(fd, sec_start + sec_size, SEEK_SET);
2319
2320         for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
2321                 if (do_write_feat(fd, header, feat, &p, evlist))
2322                         perf_header__clear_feat(header, feat);
2323         }
2324
2325         lseek(fd, sec_start, SEEK_SET);
2326         /*
2327          * may write more than needed due to dropped feature, but
2328          * this is okay, reader will skip the mising entries
2329          */
2330         err = do_write(fd, feat_sec, sec_size);
2331         if (err < 0)
2332                 pr_debug("failed to write feature section\n");
2333         free(feat_sec);
2334         return err;
2335 }
2336
2337 int perf_header__write_pipe(int fd)
2338 {
2339         struct perf_pipe_file_header f_header;
2340         int err;
2341
2342         f_header = (struct perf_pipe_file_header){
2343                 .magic     = PERF_MAGIC,
2344                 .size      = sizeof(f_header),
2345         };
2346
2347         err = do_write(fd, &f_header, sizeof(f_header));
2348         if (err < 0) {
2349                 pr_debug("failed to write perf pipe header\n");
2350                 return err;
2351         }
2352
2353         return 0;
2354 }
2355
2356 int perf_session__write_header(struct perf_session *session,
2357                                struct perf_evlist *evlist,
2358                                int fd, bool at_exit)
2359 {
2360         struct perf_file_header f_header;
2361         struct perf_file_attr   f_attr;
2362         struct perf_header *header = &session->header;
2363         struct perf_evsel *evsel;
2364         u64 attr_offset;
2365         int err;
2366
2367         lseek(fd, sizeof(f_header), SEEK_SET);
2368
2369         evlist__for_each_entry(session->evlist, evsel) {
2370                 evsel->id_offset = lseek(fd, 0, SEEK_CUR);
2371                 err = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
2372                 if (err < 0) {
2373                         pr_debug("failed to write perf header\n");
2374                         return err;
2375                 }
2376         }
2377
2378         attr_offset = lseek(fd, 0, SEEK_CUR);
2379
2380         evlist__for_each_entry(evlist, evsel) {
2381                 f_attr = (struct perf_file_attr){
2382                         .attr = evsel->attr,
2383                         .ids  = {
2384                                 .offset = evsel->id_offset,
2385                                 .size   = evsel->ids * sizeof(u64),
2386                         }
2387                 };
2388                 err = do_write(fd, &f_attr, sizeof(f_attr));
2389                 if (err < 0) {
2390                         pr_debug("failed to write perf header attribute\n");
2391                         return err;
2392                 }
2393         }
2394
2395         if (!header->data_offset)
2396                 header->data_offset = lseek(fd, 0, SEEK_CUR);
2397         header->feat_offset = header->data_offset + header->data_size;
2398
2399         if (at_exit) {
2400                 err = perf_header__adds_write(header, evlist, fd);
2401                 if (err < 0)
2402                         return err;
2403         }
2404
2405         f_header = (struct perf_file_header){
2406                 .magic     = PERF_MAGIC,
2407                 .size      = sizeof(f_header),
2408                 .attr_size = sizeof(f_attr),
2409                 .attrs = {
2410                         .offset = attr_offset,
2411                         .size   = evlist->nr_entries * sizeof(f_attr),
2412                 },
2413                 .data = {
2414                         .offset = header->data_offset,
2415                         .size   = header->data_size,
2416                 },
2417                 /* event_types is ignored, store zeros */
2418         };
2419
2420         memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
2421
2422         lseek(fd, 0, SEEK_SET);
2423         err = do_write(fd, &f_header, sizeof(f_header));
2424         if (err < 0) {
2425                 pr_debug("failed to write perf header\n");
2426                 return err;
2427         }
2428         lseek(fd, header->data_offset + header->data_size, SEEK_SET);
2429
2430         return 0;
2431 }
2432
2433 static int perf_header__getbuffer64(struct perf_header *header,
2434                                     int fd, void *buf, size_t size)
2435 {
2436         if (readn(fd, buf, size) <= 0)
2437                 return -1;
2438
2439         if (header->needs_swap)
2440                 mem_bswap_64(buf, size);
2441
2442         return 0;
2443 }
2444
2445 int perf_header__process_sections(struct perf_header *header, int fd,
2446                                   void *data,
2447                                   int (*process)(struct perf_file_section *section,
2448                                                  struct perf_header *ph,
2449                                                  int feat, int fd, void *data))
2450 {
2451         struct perf_file_section *feat_sec, *sec;
2452         int nr_sections;
2453         int sec_size;
2454         int feat;
2455         int err;
2456
2457         nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2458         if (!nr_sections)
2459                 return 0;
2460
2461         feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
2462         if (!feat_sec)
2463                 return -1;
2464
2465         sec_size = sizeof(*feat_sec) * nr_sections;
2466
2467         lseek(fd, header->feat_offset, SEEK_SET);
2468
2469         err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
2470         if (err < 0)
2471                 goto out_free;
2472
2473         for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
2474                 err = process(sec++, header, feat, fd, data);
2475                 if (err < 0)
2476                         goto out_free;
2477         }
2478         err = 0;
2479 out_free:
2480         free(feat_sec);
2481         return err;
2482 }
2483
2484 static const int attr_file_abi_sizes[] = {
2485         [0] = PERF_ATTR_SIZE_VER0,
2486         [1] = PERF_ATTR_SIZE_VER1,
2487         [2] = PERF_ATTR_SIZE_VER2,
2488         [3] = PERF_ATTR_SIZE_VER3,
2489         [4] = PERF_ATTR_SIZE_VER4,
2490         0,
2491 };
2492
2493 /*
2494  * In the legacy file format, the magic number is not used to encode endianness.
2495  * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
2496  * on ABI revisions, we need to try all combinations for all endianness to
2497  * detect the endianness.
2498  */
2499 static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
2500 {
2501         uint64_t ref_size, attr_size;
2502         int i;
2503
2504         for (i = 0 ; attr_file_abi_sizes[i]; i++) {
2505                 ref_size = attr_file_abi_sizes[i]
2506                          + sizeof(struct perf_file_section);
2507                 if (hdr_sz != ref_size) {
2508                         attr_size = bswap_64(hdr_sz);
2509                         if (attr_size != ref_size)
2510                                 continue;
2511
2512                         ph->needs_swap = true;
2513                 }
2514                 pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
2515                          i,
2516                          ph->needs_swap);
2517                 return 0;
2518         }
2519         /* could not determine endianness */
2520         return -1;
2521 }
2522
2523 #define PERF_PIPE_HDR_VER0      16
2524
2525 static const size_t attr_pipe_abi_sizes[] = {
2526         [0] = PERF_PIPE_HDR_VER0,
2527         0,
2528 };
2529
2530 /*
2531  * In the legacy pipe format, there is an implicit assumption that endiannesss
2532  * between host recording the samples, and host parsing the samples is the
2533  * same. This is not always the case given that the pipe output may always be
2534  * redirected into a file and analyzed on a different machine with possibly a
2535  * different endianness and perf_event ABI revsions in the perf tool itself.
2536  */
2537 static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
2538 {
2539         u64 attr_size;
2540         int i;
2541
2542         for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
2543                 if (hdr_sz != attr_pipe_abi_sizes[i]) {
2544                         attr_size = bswap_64(hdr_sz);
2545                         if (attr_size != hdr_sz)
2546                                 continue;
2547
2548                         ph->needs_swap = true;
2549                 }
2550                 pr_debug("Pipe ABI%d perf.data file detected\n", i);
2551                 return 0;
2552         }
2553         return -1;
2554 }
2555
2556 bool is_perf_magic(u64 magic)
2557 {
2558         if (!memcmp(&magic, __perf_magic1, sizeof(magic))
2559                 || magic == __perf_magic2
2560                 || magic == __perf_magic2_sw)
2561                 return true;
2562
2563         return false;
2564 }
2565
2566 static int check_magic_endian(u64 magic, uint64_t hdr_sz,
2567                               bool is_pipe, struct perf_header *ph)
2568 {
2569         int ret;
2570
2571         /* check for legacy format */
2572         ret = memcmp(&magic, __perf_magic1, sizeof(magic));
2573         if (ret == 0) {
2574                 ph->version = PERF_HEADER_VERSION_1;
2575                 pr_debug("legacy perf.data format\n");
2576                 if (is_pipe)
2577                         return try_all_pipe_abis(hdr_sz, ph);
2578
2579                 return try_all_file_abis(hdr_sz, ph);
2580         }
2581         /*
2582          * the new magic number serves two purposes:
2583          * - unique number to identify actual perf.data files
2584          * - encode endianness of file
2585          */
2586         ph->version = PERF_HEADER_VERSION_2;
2587
2588         /* check magic number with one endianness */
2589         if (magic == __perf_magic2)
2590                 return 0;
2591
2592         /* check magic number with opposite endianness */
2593         if (magic != __perf_magic2_sw)
2594                 return -1;
2595
2596         ph->needs_swap = true;
2597
2598         return 0;
2599 }
2600
2601 int perf_file_header__read(struct perf_file_header *header,
2602                            struct perf_header *ph, int fd)
2603 {
2604         ssize_t ret;
2605
2606         lseek(fd, 0, SEEK_SET);
2607
2608         ret = readn(fd, header, sizeof(*header));
2609         if (ret <= 0)
2610                 return -1;
2611
2612         if (check_magic_endian(header->magic,
2613                                header->attr_size, false, ph) < 0) {
2614                 pr_debug("magic/endian check failed\n");
2615                 return -1;
2616         }
2617
2618         if (ph->needs_swap) {
2619                 mem_bswap_64(header, offsetof(struct perf_file_header,
2620                              adds_features));
2621         }
2622
2623         if (header->size != sizeof(*header)) {
2624                 /* Support the previous format */
2625                 if (header->size == offsetof(typeof(*header), adds_features))
2626                         bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
2627                 else
2628                         return -1;
2629         } else if (ph->needs_swap) {
2630                 /*
2631                  * feature bitmap is declared as an array of unsigned longs --
2632                  * not good since its size can differ between the host that
2633                  * generated the data file and the host analyzing the file.
2634                  *
2635                  * We need to handle endianness, but we don't know the size of
2636                  * the unsigned long where the file was generated. Take a best
2637                  * guess at determining it: try 64-bit swap first (ie., file
2638                  * created on a 64-bit host), and check if the hostname feature
2639                  * bit is set (this feature bit is forced on as of fbe96f2).
2640                  * If the bit is not, undo the 64-bit swap and try a 32-bit
2641                  * swap. If the hostname bit is still not set (e.g., older data
2642                  * file), punt and fallback to the original behavior --
2643                  * clearing all feature bits and setting buildid.
2644                  */
2645                 mem_bswap_64(&header->adds_features,
2646                             BITS_TO_U64(HEADER_FEAT_BITS));
2647
2648                 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
2649                         /* unswap as u64 */
2650                         mem_bswap_64(&header->adds_features,
2651                                     BITS_TO_U64(HEADER_FEAT_BITS));
2652
2653                         /* unswap as u32 */
2654                         mem_bswap_32(&header->adds_features,
2655                                     BITS_TO_U32(HEADER_FEAT_BITS));
2656                 }
2657
2658                 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
2659                         bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
2660                         set_bit(HEADER_BUILD_ID, header->adds_features);
2661                 }
2662         }
2663
2664         memcpy(&ph->adds_features, &header->adds_features,
2665                sizeof(ph->adds_features));
2666
2667         ph->data_offset  = header->data.offset;
2668         ph->data_size    = header->data.size;
2669         ph->feat_offset  = header->data.offset + header->data.size;
2670         return 0;
2671 }
2672
2673 static int perf_file_section__process(struct perf_file_section *section,
2674                                       struct perf_header *ph,
2675                                       int feat, int fd, void *data)
2676 {
2677         if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2678                 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2679                           "%d, continuing...\n", section->offset, feat);
2680                 return 0;
2681         }
2682
2683         if (feat >= HEADER_LAST_FEATURE) {
2684                 pr_debug("unknown feature %d, continuing...\n", feat);
2685                 return 0;
2686         }
2687
2688         if (!feat_ops[feat].process)
2689                 return 0;
2690
2691         return feat_ops[feat].process(section, ph, fd, data);
2692 }
2693
2694 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
2695                                        struct perf_header *ph, int fd,
2696                                        bool repipe)
2697 {
2698         ssize_t ret;
2699
2700         ret = readn(fd, header, sizeof(*header));
2701         if (ret <= 0)
2702                 return -1;
2703
2704         if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
2705                 pr_debug("endian/magic failed\n");
2706                 return -1;
2707         }
2708
2709         if (ph->needs_swap)
2710                 header->size = bswap_64(header->size);
2711
2712         if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0)
2713                 return -1;
2714
2715         return 0;
2716 }
2717
2718 static int perf_header__read_pipe(struct perf_session *session)
2719 {
2720         struct perf_header *header = &session->header;
2721         struct perf_pipe_file_header f_header;
2722
2723         if (perf_file_header__read_pipe(&f_header, header,
2724                                         perf_data_file__fd(session->file),
2725                                         session->repipe) < 0) {
2726                 pr_debug("incompatible file format\n");
2727                 return -EINVAL;
2728         }
2729
2730         return 0;
2731 }
2732
2733 static int read_attr(int fd, struct perf_header *ph,
2734                      struct perf_file_attr *f_attr)
2735 {
2736         struct perf_event_attr *attr = &f_attr->attr;
2737         size_t sz, left;
2738         size_t our_sz = sizeof(f_attr->attr);
2739         ssize_t ret;
2740
2741         memset(f_attr, 0, sizeof(*f_attr));
2742
2743         /* read minimal guaranteed structure */
2744         ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
2745         if (ret <= 0) {
2746                 pr_debug("cannot read %d bytes of header attr\n",
2747                          PERF_ATTR_SIZE_VER0);
2748                 return -1;
2749         }
2750
2751         /* on file perf_event_attr size */
2752         sz = attr->size;
2753
2754         if (ph->needs_swap)
2755                 sz = bswap_32(sz);
2756
2757         if (sz == 0) {
2758                 /* assume ABI0 */
2759                 sz =  PERF_ATTR_SIZE_VER0;
2760         } else if (sz > our_sz) {
2761                 pr_debug("file uses a more recent and unsupported ABI"
2762                          " (%zu bytes extra)\n", sz - our_sz);
2763                 return -1;
2764         }
2765         /* what we have not yet read and that we know about */
2766         left = sz - PERF_ATTR_SIZE_VER0;
2767         if (left) {
2768                 void *ptr = attr;
2769                 ptr += PERF_ATTR_SIZE_VER0;
2770
2771                 ret = readn(fd, ptr, left);
2772         }
2773         /* read perf_file_section, ids are read in caller */
2774         ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
2775
2776         return ret <= 0 ? -1 : 0;
2777 }
2778
2779 static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel,
2780                                                 struct pevent *pevent)
2781 {
2782         struct event_format *event;
2783         char bf[128];
2784
2785         /* already prepared */
2786         if (evsel->tp_format)
2787                 return 0;
2788
2789         if (pevent == NULL) {
2790                 pr_debug("broken or missing trace data\n");
2791                 return -1;
2792         }
2793
2794         event = pevent_find_event(pevent, evsel->attr.config);
2795         if (event == NULL)
2796                 return -1;
2797
2798         if (!evsel->name) {
2799                 snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
2800                 evsel->name = strdup(bf);
2801                 if (evsel->name == NULL)
2802                         return -1;
2803         }
2804
2805         evsel->tp_format = event;
2806         return 0;
2807 }
2808
2809 static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist,
2810                                                   struct pevent *pevent)
2811 {
2812         struct perf_evsel *pos;
2813
2814         evlist__for_each_entry(evlist, pos) {
2815                 if (pos->attr.type == PERF_TYPE_TRACEPOINT &&
2816                     perf_evsel__prepare_tracepoint_event(pos, pevent))
2817                         return -1;
2818         }
2819
2820         return 0;
2821 }
2822
2823 int perf_session__read_header(struct perf_session *session)
2824 {
2825         struct perf_data_file *file = session->file;
2826         struct perf_header *header = &session->header;
2827         struct perf_file_header f_header;
2828         struct perf_file_attr   f_attr;
2829         u64                     f_id;
2830         int nr_attrs, nr_ids, i, j;
2831         int fd = perf_data_file__fd(file);
2832
2833         session->evlist = perf_evlist__new();
2834         if (session->evlist == NULL)
2835                 return -ENOMEM;
2836
2837         session->evlist->env = &header->env;
2838         session->machines.host.env = &header->env;
2839         if (perf_data_file__is_pipe(file))
2840                 return perf_header__read_pipe(session);
2841
2842         if (perf_file_header__read(&f_header, header, fd) < 0)
2843                 return -EINVAL;
2844
2845         /*
2846          * Sanity check that perf.data was written cleanly; data size is
2847          * initialized to 0 and updated only if the on_exit function is run.
2848          * If data size is still 0 then the file contains only partial
2849          * information.  Just warn user and process it as much as it can.
2850          */
2851         if (f_header.data.size == 0) {
2852                 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
2853                            "Was the 'perf record' command properly terminated?\n",
2854                            file->path);
2855         }
2856
2857         if (f_header.attr_size == 0) {
2858                 pr_err("ERROR: The %s file's attr size field is 0 which is unexpected.\n"
2859                        "Was the 'perf record' command properly terminated?\n",
2860                        file->path);
2861                 return -EINVAL;
2862         }
2863
2864         nr_attrs = f_header.attrs.size / f_header.attr_size;
2865         lseek(fd, f_header.attrs.offset, SEEK_SET);
2866
2867         for (i = 0; i < nr_attrs; i++) {
2868                 struct perf_evsel *evsel;
2869                 off_t tmp;
2870
2871                 if (read_attr(fd, header, &f_attr) < 0)
2872                         goto out_errno;
2873
2874                 if (header->needs_swap) {
2875                         f_attr.ids.size   = bswap_64(f_attr.ids.size);
2876                         f_attr.ids.offset = bswap_64(f_attr.ids.offset);
2877                         perf_event__attr_swap(&f_attr.attr);
2878                 }
2879
2880                 tmp = lseek(fd, 0, SEEK_CUR);
2881                 evsel = perf_evsel__new(&f_attr.attr);
2882
2883                 if (evsel == NULL)
2884                         goto out_delete_evlist;
2885
2886                 evsel->needs_swap = header->needs_swap;
2887                 /*
2888                  * Do it before so that if perf_evsel__alloc_id fails, this
2889                  * entry gets purged too at perf_evlist__delete().
2890                  */
2891                 perf_evlist__add(session->evlist, evsel);
2892
2893                 nr_ids = f_attr.ids.size / sizeof(u64);
2894                 /*
2895                  * We don't have the cpu and thread maps on the header, so
2896                  * for allocating the perf_sample_id table we fake 1 cpu and
2897                  * hattr->ids threads.
2898                  */
2899                 if (perf_evsel__alloc_id(evsel, 1, nr_ids))
2900                         goto out_delete_evlist;
2901
2902                 lseek(fd, f_attr.ids.offset, SEEK_SET);
2903
2904                 for (j = 0; j < nr_ids; j++) {
2905                         if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
2906                                 goto out_errno;
2907
2908                         perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
2909                 }
2910
2911                 lseek(fd, tmp, SEEK_SET);
2912         }
2913
2914         symbol_conf.nr_events = nr_attrs;
2915
2916         perf_header__process_sections(header, fd, &session->tevent,
2917                                       perf_file_section__process);
2918
2919         if (perf_evlist__prepare_tracepoint_events(session->evlist,
2920                                                    session->tevent.pevent))
2921                 goto out_delete_evlist;
2922
2923         return 0;
2924 out_errno:
2925         return -errno;
2926
2927 out_delete_evlist:
2928         perf_evlist__delete(session->evlist);
2929         session->evlist = NULL;
2930         return -ENOMEM;
2931 }
2932
2933 int perf_event__synthesize_attr(struct perf_tool *tool,
2934                                 struct perf_event_attr *attr, u32 ids, u64 *id,
2935                                 perf_event__handler_t process)
2936 {
2937         union perf_event *ev;
2938         size_t size;
2939         int err;
2940
2941         size = sizeof(struct perf_event_attr);
2942         size = PERF_ALIGN(size, sizeof(u64));
2943         size += sizeof(struct perf_event_header);
2944         size += ids * sizeof(u64);
2945
2946         ev = zalloc(size);
2947
2948         if (ev == NULL)
2949                 return -ENOMEM;
2950
2951         ev->attr.attr = *attr;
2952         memcpy(ev->attr.id, id, ids * sizeof(u64));
2953
2954         ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
2955         ev->attr.header.size = (u16)size;
2956
2957         if (ev->attr.header.size == size)
2958                 err = process(tool, ev, NULL, NULL);
2959         else
2960                 err = -E2BIG;
2961
2962         free(ev);
2963
2964         return err;
2965 }
2966
2967 static struct event_update_event *
2968 event_update_event__new(size_t size, u64 type, u64 id)
2969 {
2970         struct event_update_event *ev;
2971
2972         size += sizeof(*ev);
2973         size  = PERF_ALIGN(size, sizeof(u64));
2974
2975         ev = zalloc(size);
2976         if (ev) {
2977                 ev->header.type = PERF_RECORD_EVENT_UPDATE;
2978                 ev->header.size = (u16)size;
2979                 ev->type = type;
2980                 ev->id = id;
2981         }
2982         return ev;
2983 }
2984
2985 int
2986 perf_event__synthesize_event_update_unit(struct perf_tool *tool,
2987                                          struct perf_evsel *evsel,
2988                                          perf_event__handler_t process)
2989 {
2990         struct event_update_event *ev;
2991         size_t size = strlen(evsel->unit);
2992         int err;
2993
2994         ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->id[0]);
2995         if (ev == NULL)
2996                 return -ENOMEM;
2997
2998         strlcpy(ev->data, evsel->unit, size + 1);
2999         err = process(tool, (union perf_event *)ev, NULL, NULL);
3000         free(ev);
3001         return err;
3002 }
3003
3004 int
3005 perf_event__synthesize_event_update_scale(struct perf_tool *tool,
3006                                           struct perf_evsel *evsel,
3007                                           perf_event__handler_t process)
3008 {
3009         struct event_update_event *ev;
3010         struct event_update_event_scale *ev_data;
3011         int err;
3012
3013         ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->id[0]);
3014         if (ev == NULL)
3015                 return -ENOMEM;
3016
3017         ev_data = (struct event_update_event_scale *) ev->data;
3018         ev_data->scale = evsel->scale;
3019         err = process(tool, (union perf_event*) ev, NULL, NULL);
3020         free(ev);
3021         return err;
3022 }
3023
3024 int
3025 perf_event__synthesize_event_update_name(struct perf_tool *tool,
3026                                          struct perf_evsel *evsel,
3027                                          perf_event__handler_t process)
3028 {
3029         struct event_update_event *ev;
3030         size_t len = strlen(evsel->name);
3031         int err;
3032
3033         ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->id[0]);
3034         if (ev == NULL)
3035                 return -ENOMEM;
3036
3037         strlcpy(ev->data, evsel->name, len + 1);
3038         err = process(tool, (union perf_event*) ev, NULL, NULL);
3039         free(ev);
3040         return err;
3041 }
3042
3043 int
3044 perf_event__synthesize_event_update_cpus(struct perf_tool *tool,
3045                                         struct perf_evsel *evsel,
3046                                         perf_event__handler_t process)
3047 {
3048         size_t size = sizeof(struct event_update_event);
3049         struct event_update_event *ev;
3050         int max, err;
3051         u16 type;
3052
3053         if (!evsel->own_cpus)
3054                 return 0;
3055
3056         ev = cpu_map_data__alloc(evsel->own_cpus, &size, &type, &max);
3057         if (!ev)
3058                 return -ENOMEM;
3059
3060         ev->header.type = PERF_RECORD_EVENT_UPDATE;
3061         ev->header.size = (u16)size;
3062         ev->type = PERF_EVENT_UPDATE__CPUS;
3063         ev->id   = evsel->id[0];
3064
3065         cpu_map_data__synthesize((struct cpu_map_data *) ev->data,
3066                                  evsel->own_cpus,
3067                                  type, max);
3068
3069         err = process(tool, (union perf_event*) ev, NULL, NULL);
3070         free(ev);
3071         return err;
3072 }
3073
3074 size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
3075 {
3076         struct event_update_event *ev = &event->event_update;
3077         struct event_update_event_scale *ev_scale;
3078         struct event_update_event_cpus *ev_cpus;
3079         struct cpu_map *map;
3080         size_t ret;
3081
3082         ret = fprintf(fp, "\n... id:    %" PRIu64 "\n", ev->id);
3083
3084         switch (ev->type) {
3085         case PERF_EVENT_UPDATE__SCALE:
3086                 ev_scale = (struct event_update_event_scale *) ev->data;
3087                 ret += fprintf(fp, "... scale: %f\n", ev_scale->scale);
3088                 break;
3089         case PERF_EVENT_UPDATE__UNIT:
3090                 ret += fprintf(fp, "... unit:  %s\n", ev->data);
3091                 break;
3092         case PERF_EVENT_UPDATE__NAME:
3093                 ret += fprintf(fp, "... name:  %s\n", ev->data);
3094                 break;
3095         case PERF_EVENT_UPDATE__CPUS:
3096                 ev_cpus = (struct event_update_event_cpus *) ev->data;
3097                 ret += fprintf(fp, "... ");
3098
3099                 map = cpu_map__new_data(&ev_cpus->cpus);
3100                 if (map)
3101                         ret += cpu_map__fprintf(map, fp);
3102                 else
3103                         ret += fprintf(fp, "failed to get cpus\n");
3104                 break;
3105         default:
3106                 ret += fprintf(fp, "... unknown type\n");
3107                 break;
3108         }
3109
3110         return ret;
3111 }
3112
3113 int perf_event__synthesize_attrs(struct perf_tool *tool,
3114                                    struct perf_session *session,
3115                                    perf_event__handler_t process)
3116 {
3117         struct perf_evsel *evsel;
3118         int err = 0;
3119
3120         evlist__for_each_entry(session->evlist, evsel) {
3121                 err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids,
3122                                                   evsel->id, process);
3123                 if (err) {
3124                         pr_debug("failed to create perf header attribute\n");
3125                         return err;
3126                 }
3127         }
3128
3129         return err;
3130 }
3131
3132 int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
3133                              union perf_event *event,
3134                              struct perf_evlist **pevlist)
3135 {
3136         u32 i, ids, n_ids;
3137         struct perf_evsel *evsel;
3138         struct perf_evlist *evlist = *pevlist;
3139
3140         if (evlist == NULL) {
3141                 *pevlist = evlist = perf_evlist__new();
3142                 if (evlist == NULL)
3143                         return -ENOMEM;
3144         }
3145
3146         evsel = perf_evsel__new(&event->attr.attr);
3147         if (evsel == NULL)
3148                 return -ENOMEM;
3149
3150         perf_evlist__add(evlist, evsel);
3151
3152         ids = event->header.size;
3153         ids -= (void *)&event->attr.id - (void *)event;
3154         n_ids = ids / sizeof(u64);
3155         /*
3156          * We don't have the cpu and thread maps on the header, so
3157          * for allocating the perf_sample_id table we fake 1 cpu and
3158          * hattr->ids threads.
3159          */
3160         if (perf_evsel__alloc_id(evsel, 1, n_ids))
3161                 return -ENOMEM;
3162
3163         for (i = 0; i < n_ids; i++) {
3164                 perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
3165         }
3166
3167         symbol_conf.nr_events = evlist->nr_entries;
3168
3169         return 0;
3170 }
3171
3172 int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
3173                                      union perf_event *event,
3174                                      struct perf_evlist **pevlist)
3175 {
3176         struct event_update_event *ev = &event->event_update;
3177         struct event_update_event_scale *ev_scale;
3178         struct event_update_event_cpus *ev_cpus;
3179         struct perf_evlist *evlist;
3180         struct perf_evsel *evsel;
3181         struct cpu_map *map;
3182
3183         if (!pevlist || *pevlist == NULL)
3184                 return -EINVAL;
3185
3186         evlist = *pevlist;
3187
3188         evsel = perf_evlist__id2evsel(evlist, ev->id);
3189         if (evsel == NULL)
3190                 return -EINVAL;
3191
3192         switch (ev->type) {
3193         case PERF_EVENT_UPDATE__UNIT:
3194                 evsel->unit = strdup(ev->data);
3195                 break;
3196         case PERF_EVENT_UPDATE__NAME:
3197                 evsel->name = strdup(ev->data);
3198                 break;
3199         case PERF_EVENT_UPDATE__SCALE:
3200                 ev_scale = (struct event_update_event_scale *) ev->data;
3201                 evsel->scale = ev_scale->scale;
3202                 break;
3203         case PERF_EVENT_UPDATE__CPUS:
3204                 ev_cpus = (struct event_update_event_cpus *) ev->data;
3205
3206                 map = cpu_map__new_data(&ev_cpus->cpus);
3207                 if (map)
3208                         evsel->own_cpus = map;
3209                 else
3210                         pr_err("failed to get event_update cpus\n");
3211         default:
3212                 break;
3213         }
3214
3215         return 0;
3216 }
3217
3218 int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
3219                                         struct perf_evlist *evlist,
3220                                         perf_event__handler_t process)
3221 {
3222         union perf_event ev;
3223         struct tracing_data *tdata;
3224         ssize_t size = 0, aligned_size = 0, padding;
3225         int err __maybe_unused = 0;
3226
3227         /*
3228          * We are going to store the size of the data followed
3229          * by the data contents. Since the fd descriptor is a pipe,
3230          * we cannot seek back to store the size of the data once
3231          * we know it. Instead we:
3232          *
3233          * - write the tracing data to the temp file
3234          * - get/write the data size to pipe
3235          * - write the tracing data from the temp file
3236          *   to the pipe
3237          */
3238         tdata = tracing_data_get(&evlist->entries, fd, true);
3239         if (!tdata)
3240                 return -1;
3241
3242         memset(&ev, 0, sizeof(ev));
3243
3244         ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
3245         size = tdata->size;
3246         aligned_size = PERF_ALIGN(size, sizeof(u64));
3247         padding = aligned_size - size;
3248         ev.tracing_data.header.size = sizeof(ev.tracing_data);
3249         ev.tracing_data.size = aligned_size;
3250
3251         process(tool, &ev, NULL, NULL);
3252
3253         /*
3254          * The put function will copy all the tracing data
3255          * stored in temp file to the pipe.
3256          */
3257         tracing_data_put(tdata);
3258
3259         write_padded(fd, NULL, 0, padding);
3260
3261         return aligned_size;
3262 }
3263
3264 int perf_event__process_tracing_data(struct perf_tool *tool __maybe_unused,
3265                                      union perf_event *event,
3266                                      struct perf_session *session)
3267 {
3268         ssize_t size_read, padding, size = event->tracing_data.size;
3269         int fd = perf_data_file__fd(session->file);
3270         off_t offset = lseek(fd, 0, SEEK_CUR);
3271         char buf[BUFSIZ];
3272
3273         /* setup for reading amidst mmap */
3274         lseek(fd, offset + sizeof(struct tracing_data_event),
3275               SEEK_SET);
3276
3277         size_read = trace_report(fd, &session->tevent,
3278                                  session->repipe);
3279         padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
3280
3281         if (readn(fd, buf, padding) < 0) {
3282                 pr_err("%s: reading input file", __func__);
3283                 return -1;
3284         }
3285         if (session->repipe) {
3286                 int retw = write(STDOUT_FILENO, buf, padding);
3287                 if (retw <= 0 || retw != padding) {
3288                         pr_err("%s: repiping tracing data padding", __func__);
3289                         return -1;
3290                 }
3291         }
3292
3293         if (size_read + padding != size) {
3294                 pr_err("%s: tracing data size mismatch", __func__);
3295                 return -1;
3296         }
3297
3298         perf_evlist__prepare_tracepoint_events(session->evlist,
3299                                                session->tevent.pevent);
3300
3301         return size_read + padding;
3302 }
3303
3304 int perf_event__synthesize_build_id(struct perf_tool *tool,
3305                                     struct dso *pos, u16 misc,
3306                                     perf_event__handler_t process,
3307                                     struct machine *machine)
3308 {
3309         union perf_event ev;
3310         size_t len;
3311         int err = 0;
3312
3313         if (!pos->hit)
3314                 return err;
3315
3316         memset(&ev, 0, sizeof(ev));
3317
3318         len = pos->long_name_len + 1;
3319         len = PERF_ALIGN(len, NAME_ALIGN);
3320         memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
3321         ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
3322         ev.build_id.header.misc = misc;
3323         ev.build_id.pid = machine->pid;
3324         ev.build_id.header.size = sizeof(ev.build_id) + len;
3325         memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
3326
3327         err = process(tool, &ev, NULL, machine);
3328
3329         return err;
3330 }
3331
3332 int perf_event__process_build_id(struct perf_tool *tool __maybe_unused,
3333                                  union perf_event *event,
3334                                  struct perf_session *session)
3335 {
3336         __event_process_build_id(&event->build_id,
3337                                  event->build_id.filename,
3338                                  session);
3339         return 0;
3340 }