2 * Meta performance counter support.
3 * Copyright (C) 2012 Imagination Technologies Ltd
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
10 #ifndef METAG_PERF_EVENT_H_
11 #define METAG_PERF_EVENT_H_
13 #include <linux/kernel.h>
14 #include <linux/interrupt.h>
15 #include <linux/perf_event.h>
17 /* For performance counter definitions */
18 #include <asm/metag_mem.h>
21 * The Meta core has two performance counters, with 24-bit resolution. Newer
22 * cores generate an overflow interrupt on transition from 0xffffff to 0.
24 * Each counter consists of the counter id, hardware thread id, and the count
25 * itself; each counter can be assigned to multiple hardware threads at any
26 * one time, with the returned count being an aggregate of events. A small
27 * number of events are thread global, i.e. they count the aggregate of all
28 * threads' events, regardless of the thread selected.
30 * Newer cores can store an arbitrary 24-bit number in the counter, whereas
31 * older cores will clear the counter bits on write.
33 * We also have a pseudo-counter in the form of the thread active cycles
34 * counter (which, incidentally, is also bound to
37 #define MAX_HWEVENTS 3
38 #define MAX_PERIOD ((1UL << 24) - 1)
39 #define METAG_INST_COUNTER (MAX_HWEVENTS - 1)
42 * struct cpu_hw_events - a processor core's performance events
43 * @events: an array of perf_events active for a given index.
44 * @used_mask: a bitmap of in-use counters.
45 * @pmu_lock: a perf counter lock
47 * This is a per-cpu/core structure that maintains a record of its
48 * performance counters' state.
50 struct cpu_hw_events {
51 struct perf_event *events[MAX_HWEVENTS];
52 unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
53 raw_spinlock_t pmu_lock;
57 * struct metag_pmu - the Meta PMU structure
58 * @pmu: core pmu structure
60 * @version: core version
61 * @handle_irq: overflow interrupt handler
62 * @enable: enable a counter
63 * @disable: disable a counter
64 * @read: read the value of a counter
65 * @write: write a value to a counter
66 * @event_map: kernel event to counter event id map
67 * @cache_events: kernel cache counter to core cache counter map
68 * @max_period: maximum value of the counter before overflow
69 * @max_events: maximum number of counters available at any one time
70 * @active_events: number of active counters
71 * @reserve_mutex: counter reservation mutex
73 * This describes the main functionality and data used by the performance
80 irqreturn_t (*handle_irq)(int irq_num, void *dev);
81 void (*enable)(struct hw_perf_event *evt, int idx);
82 void (*disable)(struct hw_perf_event *evt, int idx);
84 void (*write)(int idx, u32 val);
85 int (*event_map)(int idx);
86 const int (*cache_events)[PERF_COUNT_HW_CACHE_MAX]
87 [PERF_COUNT_HW_CACHE_OP_MAX]
88 [PERF_COUNT_HW_CACHE_RESULT_MAX];
91 atomic_t active_events;
92 struct mutex reserve_mutex;
95 /* Convenience macros for accessing the perf counters */
96 /* Define some convenience accessors */
97 #define PERF_COUNT(x) (PERF_COUNT0 + (sizeof(u64) * (x)))
98 #define PERF_ICORE(x) (PERF_ICORE0 + (sizeof(u64) * (x)))
99 #define PERF_CHAN(x) (PERF_CHAN0 + (sizeof(u64) * (x)))
101 /* Cache index macros */
102 #define C(x) PERF_COUNT_HW_CACHE_##x
103 #define CACHE_OP_UNSUPPORTED 0xfffe
104 #define CACHE_OP_NONSENSE 0xffff