2 * (C) 2010,2011 Thomas Renninger <trenn@suse.de>, Novell Inc.
4 * Licensed under the terms of the GNU GPL License version 2.
6 * Based on SandyBridge monitor. Implements the new package C-states
7 * (PC8, PC9, PC10) coming with a specific Haswell (family 0x45) CPU.
10 #if defined(__i386__) || defined(__x86_64__)
17 #include "helpers/helpers.h"
18 #include "idle_monitor/cpupower-monitor.h"
20 #define MSR_PKG_C8_RESIDENCY 0x00000630
21 #define MSR_PKG_C9_RESIDENCY 0x00000631
22 #define MSR_PKG_C10_RESIDENCY 0x00000632
26 enum intel_hsw_ext_id { PC8 = 0, PC9, PC10, HSW_EXT_CSTATE_COUNT,
29 static int hsw_ext_get_count_percent(unsigned int self_id, double *percent,
32 static cstate_t hsw_ext_cstates[HSW_EXT_CSTATE_COUNT] = {
35 .desc = N_("Processor Package C8"),
37 .range = RANGE_PACKAGE,
38 .get_count_percent = hsw_ext_get_count_percent,
42 .desc = N_("Processor Package C9"),
44 .range = RANGE_PACKAGE,
45 .get_count_percent = hsw_ext_get_count_percent,
49 .desc = N_("Processor Package C10"),
51 .range = RANGE_PACKAGE,
52 .get_count_percent = hsw_ext_get_count_percent,
56 static unsigned long long tsc_at_measure_start;
57 static unsigned long long tsc_at_measure_end;
58 static unsigned long long *previous_count[HSW_EXT_CSTATE_COUNT];
59 static unsigned long long *current_count[HSW_EXT_CSTATE_COUNT];
60 /* valid flag for all CPUs. If a MSR read failed it will be zero */
63 static int hsw_ext_get_count(enum intel_hsw_ext_id id, unsigned long long *val,
70 msr = MSR_PKG_C8_RESIDENCY;
73 msr = MSR_PKG_C9_RESIDENCY;
76 msr = MSR_PKG_C10_RESIDENCY;
84 if (read_msr(cpu, msr, val))
89 static int hsw_ext_get_count_percent(unsigned int id, double *percent,
98 (current_count[id][cpu] - previous_count[id][cpu])) /
99 (tsc_at_measure_end - tsc_at_measure_start);
101 dprint("%s: previous: %llu - current: %llu - (%u)\n",
102 hsw_ext_cstates[id].name, previous_count[id][cpu],
103 current_count[id][cpu], cpu);
105 dprint("%s: tsc_diff: %llu - count_diff: %llu - percent: %2.f (%u)\n",
106 hsw_ext_cstates[id].name,
107 (unsigned long long) tsc_at_measure_end - tsc_at_measure_start,
108 current_count[id][cpu] - previous_count[id][cpu],
114 static int hsw_ext_start(void)
117 unsigned long long val;
119 for (num = 0; num < HSW_EXT_CSTATE_COUNT; num++) {
120 for (cpu = 0; cpu < cpu_count; cpu++) {
121 hsw_ext_get_count(num, &val, cpu);
122 previous_count[num][cpu] = val;
125 hsw_ext_get_count(TSC, &tsc_at_measure_start, 0);
129 static int hsw_ext_stop(void)
131 unsigned long long val;
134 hsw_ext_get_count(TSC, &tsc_at_measure_end, 0);
136 for (num = 0; num < HSW_EXT_CSTATE_COUNT; num++) {
137 for (cpu = 0; cpu < cpu_count; cpu++) {
138 is_valid[cpu] = !hsw_ext_get_count(num, &val, cpu);
139 current_count[num][cpu] = val;
145 struct cpuidle_monitor intel_hsw_ext_monitor;
147 static struct cpuidle_monitor *hsw_ext_register(void)
151 if (cpupower_cpu_info.vendor != X86_VENDOR_INTEL
152 || cpupower_cpu_info.family != 6)
155 switch (cpupower_cpu_info.model) {
162 is_valid = calloc(cpu_count, sizeof(int));
163 for (num = 0; num < HSW_EXT_CSTATE_COUNT; num++) {
164 previous_count[num] = calloc(cpu_count,
165 sizeof(unsigned long long));
166 current_count[num] = calloc(cpu_count,
167 sizeof(unsigned long long));
169 intel_hsw_ext_monitor.name_len = strlen(intel_hsw_ext_monitor.name);
170 return &intel_hsw_ext_monitor;
173 void hsw_ext_unregister(void)
177 for (num = 0; num < HSW_EXT_CSTATE_COUNT; num++) {
178 free(previous_count[num]);
179 free(current_count[num]);
183 struct cpuidle_monitor intel_hsw_ext_monitor = {
184 .name = "HaswellExtended",
185 .hw_states = hsw_ext_cstates,
186 .hw_states_num = HSW_EXT_CSTATE_COUNT,
187 .start = hsw_ext_start,
188 .stop = hsw_ext_stop,
189 .do_register = hsw_ext_register,
190 .unregister = hsw_ext_unregister,
192 .overflow_s = 922000000 /* 922337203 seconds TSC overflow
195 #endif /* defined(__i386__) || defined(__x86_64__) */