2 * (C) 2010,2011 Thomas Renninger <trenn@suse.de>, Novell Inc.
4 * Licensed under the terms of the GNU GPL License version 2.
6 * Based on Len Brown's <lenb@kernel.org> turbostat tool.
9 #if defined(__i386__) || defined(__x86_64__)
16 #include "helpers/helpers.h"
17 #include "idle_monitor/cpupower-monitor.h"
19 #define MSR_PKG_C3_RESIDENCY 0x3F8
20 #define MSR_PKG_C6_RESIDENCY 0x3F9
21 #define MSR_CORE_C3_RESIDENCY 0x3FC
22 #define MSR_CORE_C6_RESIDENCY 0x3FD
26 #define NHM_CSTATE_COUNT 4
28 enum intel_nhm_id { C3 = 0, C6, PC3, PC6, TSC = 0xFFFF };
30 static int nhm_get_count_percent(unsigned int self_id, double *percent,
33 static cstate_t nhm_cstates[NHM_CSTATE_COUNT] = {
36 .desc = N_("Processor Core C3"),
39 .get_count_percent = nhm_get_count_percent,
43 .desc = N_("Processor Core C6"),
46 .get_count_percent = nhm_get_count_percent,
51 .desc = N_("Processor Package C3"),
53 .range = RANGE_PACKAGE,
54 .get_count_percent = nhm_get_count_percent,
58 .desc = N_("Processor Package C6"),
60 .range = RANGE_PACKAGE,
61 .get_count_percent = nhm_get_count_percent,
65 static unsigned long long tsc_at_measure_start;
66 static unsigned long long tsc_at_measure_end;
67 static unsigned long long *previous_count[NHM_CSTATE_COUNT];
68 static unsigned long long *current_count[NHM_CSTATE_COUNT];
69 /* valid flag for all CPUs. If a MSR read failed it will be zero */
72 static int nhm_get_count(enum intel_nhm_id id, unsigned long long *val,
79 msr = MSR_CORE_C3_RESIDENCY;
82 msr = MSR_CORE_C6_RESIDENCY;
85 msr = MSR_PKG_C3_RESIDENCY;
88 msr = MSR_PKG_C6_RESIDENCY;
96 if (read_msr(cpu, msr, val))
102 static int nhm_get_count_percent(unsigned int id, double *percent,
111 (current_count[id][cpu] - previous_count[id][cpu])) /
112 (tsc_at_measure_end - tsc_at_measure_start);
114 dprint("%s: previous: %llu - current: %llu - (%u)\n",
115 nhm_cstates[id].name, previous_count[id][cpu],
116 current_count[id][cpu], cpu);
118 dprint("%s: tsc_diff: %llu - count_diff: %llu - percent: %2.f (%u)\n",
119 nhm_cstates[id].name,
120 (unsigned long long) tsc_at_measure_end - tsc_at_measure_start,
121 current_count[id][cpu] - previous_count[id][cpu],
127 static int nhm_start(void)
130 unsigned long long dbg, val;
132 nhm_get_count(TSC, &tsc_at_measure_start, 0);
134 for (num = 0; num < NHM_CSTATE_COUNT; num++) {
135 for (cpu = 0; cpu < cpu_count; cpu++) {
136 is_valid[cpu] = !nhm_get_count(num, &val, cpu);
137 previous_count[num][cpu] = val;
140 nhm_get_count(TSC, &dbg, 0);
141 dprint("TSC diff: %llu\n", dbg - tsc_at_measure_start);
145 static int nhm_stop(void)
147 unsigned long long val;
148 unsigned long long dbg;
151 nhm_get_count(TSC, &tsc_at_measure_end, 0);
153 for (num = 0; num < NHM_CSTATE_COUNT; num++) {
154 for (cpu = 0; cpu < cpu_count; cpu++) {
155 is_valid[cpu] = !nhm_get_count(num, &val, cpu);
156 current_count[num][cpu] = val;
159 nhm_get_count(TSC, &dbg, 0);
160 dprint("TSC diff: %llu\n", dbg - tsc_at_measure_end);
165 struct cpuidle_monitor intel_nhm_monitor;
167 struct cpuidle_monitor *intel_nhm_register(void)
171 if (cpupower_cpu_info.vendor != X86_VENDOR_INTEL)
174 if (!(cpupower_cpu_info.caps & CPUPOWER_CAP_INV_TSC))
177 if (!(cpupower_cpu_info.caps & CPUPOWER_CAP_APERF))
180 /* Free this at program termination */
181 is_valid = calloc(cpu_count, sizeof(int));
182 for (num = 0; num < NHM_CSTATE_COUNT; num++) {
183 previous_count[num] = calloc(cpu_count,
184 sizeof(unsigned long long));
185 current_count[num] = calloc(cpu_count,
186 sizeof(unsigned long long));
189 intel_nhm_monitor.name_len = strlen(intel_nhm_monitor.name);
190 return &intel_nhm_monitor;
193 void intel_nhm_unregister(void)
197 for (num = 0; num < NHM_CSTATE_COUNT; num++) {
198 free(previous_count[num]);
199 free(current_count[num]);
204 struct cpuidle_monitor intel_nhm_monitor = {
206 .hw_states_num = NHM_CSTATE_COUNT,
207 .hw_states = nhm_cstates,
210 .do_register = intel_nhm_register,
211 .unregister = intel_nhm_unregister,
213 .overflow_s = 922000000 /* 922337203 seconds TSC overflow