GNU Linux-libre 4.4.289-gnu1
[releases.git] / drivers / acpi / processor_perflib.c
1 /*
2  * processor_perflib.c - ACPI Processor P-States Library ($Revision: 71 $)
3  *
4  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6  *  Copyright (C) 2004       Dominik Brodowski <linux@brodo.de>
7  *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8  *                      - Added processor hotplug support
9  *
10  *
11  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
12  *
13  *  This program is free software; you can redistribute it and/or modify
14  *  it under the terms of the GNU General Public License as published by
15  *  the Free Software Foundation; either version 2 of the License, or (at
16  *  your option) any later version.
17  *
18  *  This program is distributed in the hope that it will be useful, but
19  *  WITHOUT ANY WARRANTY; without even the implied warranty of
20  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
21  *  General Public License for more details.
22  *
23  */
24
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/cpufreq.h>
29 #include <linux/slab.h>
30 #include <linux/acpi.h>
31 #include <acpi/processor.h>
32 #ifdef CONFIG_X86
33 #include <asm/cpufeature.h>
34 #endif
35
36 #define PREFIX "ACPI: "
37
38 #define ACPI_PROCESSOR_CLASS            "processor"
39 #define ACPI_PROCESSOR_FILE_PERFORMANCE "performance"
40 #define _COMPONENT              ACPI_PROCESSOR_COMPONENT
41 ACPI_MODULE_NAME("processor_perflib");
42
43 static DEFINE_MUTEX(performance_mutex);
44
45 /*
46  * _PPC support is implemented as a CPUfreq policy notifier:
47  * This means each time a CPUfreq driver registered also with
48  * the ACPI core is asked to change the speed policy, the maximum
49  * value is adjusted so that it is within the platform limit.
50  *
51  * Also, when a new platform limit value is detected, the CPUfreq
52  * policy is adjusted accordingly.
53  */
54
55 /* ignore_ppc:
56  * -1 -> cpufreq low level drivers not initialized -> _PSS, etc. not called yet
57  *       ignore _PPC
58  *  0 -> cpufreq low level drivers initialized -> consider _PPC values
59  *  1 -> ignore _PPC totally -> forced by user through boot param
60  */
61 static int ignore_ppc = -1;
62 module_param(ignore_ppc, int, 0644);
63 MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \
64                  "limited by BIOS, this should help");
65
66 #define PPC_REGISTERED   1
67 #define PPC_IN_USE       2
68
69 static int acpi_processor_ppc_status;
70
71 static int acpi_processor_ppc_notifier(struct notifier_block *nb,
72                                        unsigned long event, void *data)
73 {
74         struct cpufreq_policy *policy = data;
75         struct acpi_processor *pr;
76         unsigned int ppc = 0;
77
78         if (event == CPUFREQ_START && ignore_ppc <= 0) {
79                 ignore_ppc = 0;
80                 return 0;
81         }
82
83         if (ignore_ppc)
84                 return 0;
85
86         if (event != CPUFREQ_ADJUST)
87                 return 0;
88
89         mutex_lock(&performance_mutex);
90
91         pr = per_cpu(processors, policy->cpu);
92         if (!pr || !pr->performance)
93                 goto out;
94
95         ppc = (unsigned int)pr->performance_platform_limit;
96
97         if (ppc >= pr->performance->state_count)
98                 goto out;
99
100         cpufreq_verify_within_limits(policy, 0,
101                                      pr->performance->states[ppc].
102                                      core_frequency * 1000);
103
104       out:
105         mutex_unlock(&performance_mutex);
106
107         return 0;
108 }
109
110 static struct notifier_block acpi_ppc_notifier_block = {
111         .notifier_call = acpi_processor_ppc_notifier,
112 };
113
114 static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
115 {
116         acpi_status status = 0;
117         unsigned long long ppc = 0;
118
119
120         if (!pr)
121                 return -EINVAL;
122
123         /*
124          * _PPC indicates the maximum state currently supported by the platform
125          * (e.g. 0 = states 0..n; 1 = states 1..n; etc.
126          */
127         status = acpi_evaluate_integer(pr->handle, "_PPC", NULL, &ppc);
128
129         if (status != AE_NOT_FOUND)
130                 acpi_processor_ppc_status |= PPC_IN_USE;
131
132         if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
133                 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PPC"));
134                 return -ENODEV;
135         }
136
137         pr_debug("CPU %d: _PPC is %d - frequency %s limited\n", pr->id,
138                        (int)ppc, ppc ? "" : "not");
139
140         pr->performance_platform_limit = (int)ppc;
141
142         return 0;
143 }
144
145 #define ACPI_PROCESSOR_NOTIFY_PERFORMANCE       0x80
146 /*
147  * acpi_processor_ppc_ost: Notify firmware the _PPC evaluation status
148  * @handle: ACPI processor handle
149  * @status: the status code of _PPC evaluation
150  *      0: success. OSPM is now using the performance state specificed.
151  *      1: failure. OSPM has not changed the number of P-states in use
152  */
153 static void acpi_processor_ppc_ost(acpi_handle handle, int status)
154 {
155         if (acpi_has_method(handle, "_OST"))
156                 acpi_evaluate_ost(handle, ACPI_PROCESSOR_NOTIFY_PERFORMANCE,
157                                   status, NULL);
158 }
159
160 int acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
161 {
162         int ret;
163
164         if (ignore_ppc || !pr->performance) {
165                 /*
166                  * Only when it is notification event, the _OST object
167                  * will be evaluated. Otherwise it is skipped.
168                  */
169                 if (event_flag)
170                         acpi_processor_ppc_ost(pr->handle, 1);
171                 return 0;
172         }
173
174         ret = acpi_processor_get_platform_limit(pr);
175         /*
176          * Only when it is notification event, the _OST object
177          * will be evaluated. Otherwise it is skipped.
178          */
179         if (event_flag) {
180                 if (ret < 0)
181                         acpi_processor_ppc_ost(pr->handle, 1);
182                 else
183                         acpi_processor_ppc_ost(pr->handle, 0);
184         }
185         if (ret < 0)
186                 return (ret);
187         else
188                 return cpufreq_update_policy(pr->id);
189 }
190
191 int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
192 {
193         struct acpi_processor *pr;
194
195         pr = per_cpu(processors, cpu);
196         if (!pr || !pr->performance || !pr->performance->state_count)
197                 return -ENODEV;
198         *limit = pr->performance->states[pr->performance_platform_limit].
199                 core_frequency * 1000;
200         return 0;
201 }
202 EXPORT_SYMBOL(acpi_processor_get_bios_limit);
203
204 void acpi_processor_ppc_init(void)
205 {
206         if (!cpufreq_register_notifier
207             (&acpi_ppc_notifier_block, CPUFREQ_POLICY_NOTIFIER))
208                 acpi_processor_ppc_status |= PPC_REGISTERED;
209         else
210                 printk(KERN_DEBUG
211                        "Warning: Processor Platform Limit not supported.\n");
212 }
213
214 void acpi_processor_ppc_exit(void)
215 {
216         if (acpi_processor_ppc_status & PPC_REGISTERED)
217                 cpufreq_unregister_notifier(&acpi_ppc_notifier_block,
218                                             CPUFREQ_POLICY_NOTIFIER);
219
220         acpi_processor_ppc_status &= ~PPC_REGISTERED;
221 }
222
223 static int acpi_processor_get_performance_control(struct acpi_processor *pr)
224 {
225         int result = 0;
226         acpi_status status = 0;
227         struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
228         union acpi_object *pct = NULL;
229         union acpi_object obj = { 0 };
230
231
232         status = acpi_evaluate_object(pr->handle, "_PCT", NULL, &buffer);
233         if (ACPI_FAILURE(status)) {
234                 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PCT"));
235                 return -ENODEV;
236         }
237
238         pct = (union acpi_object *)buffer.pointer;
239         if (!pct || (pct->type != ACPI_TYPE_PACKAGE)
240             || (pct->package.count != 2)) {
241                 printk(KERN_ERR PREFIX "Invalid _PCT data\n");
242                 result = -EFAULT;
243                 goto end;
244         }
245
246         /*
247          * control_register
248          */
249
250         obj = pct->package.elements[0];
251
252         if ((obj.type != ACPI_TYPE_BUFFER)
253             || (obj.buffer.length < sizeof(struct acpi_pct_register))
254             || (obj.buffer.pointer == NULL)) {
255                 printk(KERN_ERR PREFIX "Invalid _PCT data (control_register)\n");
256                 result = -EFAULT;
257                 goto end;
258         }
259         memcpy(&pr->performance->control_register, obj.buffer.pointer,
260                sizeof(struct acpi_pct_register));
261
262         /*
263          * status_register
264          */
265
266         obj = pct->package.elements[1];
267
268         if ((obj.type != ACPI_TYPE_BUFFER)
269             || (obj.buffer.length < sizeof(struct acpi_pct_register))
270             || (obj.buffer.pointer == NULL)) {
271                 printk(KERN_ERR PREFIX "Invalid _PCT data (status_register)\n");
272                 result = -EFAULT;
273                 goto end;
274         }
275
276         memcpy(&pr->performance->status_register, obj.buffer.pointer,
277                sizeof(struct acpi_pct_register));
278
279       end:
280         kfree(buffer.pointer);
281
282         return result;
283 }
284
285 #ifdef CONFIG_X86
286 /*
287  * Some AMDs have 50MHz frequency multiples, but only provide 100MHz rounding
288  * in their ACPI data. Calculate the real values and fix up the _PSS data.
289  */
290 static void amd_fixup_frequency(struct acpi_processor_px *px, int i)
291 {
292         u32 hi, lo, fid, did;
293         int index = px->control & 0x00000007;
294
295         if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
296                 return;
297
298         if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
299             || boot_cpu_data.x86 == 0x11) {
300                 rdmsr(MSR_AMD_PSTATE_DEF_BASE + index, lo, hi);
301                 /*
302                  * MSR C001_0064+:
303                  * Bit 63: PstateEn. Read-write. If set, the P-state is valid.
304                  */
305                 if (!(hi & BIT(31)))
306                         return;
307
308                 fid = lo & 0x3f;
309                 did = (lo >> 6) & 7;
310                 if (boot_cpu_data.x86 == 0x10)
311                         px->core_frequency = (100 * (fid + 0x10)) >> did;
312                 else
313                         px->core_frequency = (100 * (fid + 8)) >> did;
314         }
315 }
316 #else
317 static void amd_fixup_frequency(struct acpi_processor_px *px, int i) {};
318 #endif
319
320 static int acpi_processor_get_performance_states(struct acpi_processor *pr)
321 {
322         int result = 0;
323         acpi_status status = AE_OK;
324         struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
325         struct acpi_buffer format = { sizeof("NNNNNN"), "NNNNNN" };
326         struct acpi_buffer state = { 0, NULL };
327         union acpi_object *pss = NULL;
328         int i;
329         int last_invalid = -1;
330
331
332         status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
333         if (ACPI_FAILURE(status)) {
334                 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PSS"));
335                 return -ENODEV;
336         }
337
338         pss = buffer.pointer;
339         if (!pss || (pss->type != ACPI_TYPE_PACKAGE)) {
340                 printk(KERN_ERR PREFIX "Invalid _PSS data\n");
341                 result = -EFAULT;
342                 goto end;
343         }
344
345         ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d performance states\n",
346                           pss->package.count));
347
348         pr->performance->state_count = pss->package.count;
349         pr->performance->states =
350             kmalloc(sizeof(struct acpi_processor_px) * pss->package.count,
351                     GFP_KERNEL);
352         if (!pr->performance->states) {
353                 result = -ENOMEM;
354                 goto end;
355         }
356
357         for (i = 0; i < pr->performance->state_count; i++) {
358
359                 struct acpi_processor_px *px = &(pr->performance->states[i]);
360
361                 state.length = sizeof(struct acpi_processor_px);
362                 state.pointer = px;
363
364                 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i));
365
366                 status = acpi_extract_package(&(pss->package.elements[i]),
367                                               &format, &state);
368                 if (ACPI_FAILURE(status)) {
369                         ACPI_EXCEPTION((AE_INFO, status, "Invalid _PSS data"));
370                         result = -EFAULT;
371                         kfree(pr->performance->states);
372                         goto end;
373                 }
374
375                 amd_fixup_frequency(px, i);
376
377                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
378                                   "State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n",
379                                   i,
380                                   (u32) px->core_frequency,
381                                   (u32) px->power,
382                                   (u32) px->transition_latency,
383                                   (u32) px->bus_master_latency,
384                                   (u32) px->control, (u32) px->status));
385
386                 /*
387                  * Check that ACPI's u64 MHz will be valid as u32 KHz in cpufreq
388                  */
389                 if (!px->core_frequency ||
390                     ((u32)(px->core_frequency * 1000) !=
391                      (px->core_frequency * 1000))) {
392                         printk(KERN_ERR FW_BUG PREFIX
393                                "Invalid BIOS _PSS frequency found for processor %d: 0x%llx MHz\n",
394                                pr->id, px->core_frequency);
395                         if (last_invalid == -1)
396                                 last_invalid = i;
397                 } else {
398                         if (last_invalid != -1) {
399                                 /*
400                                  * Copy this valid entry over last_invalid entry
401                                  */
402                                 memcpy(&(pr->performance->states[last_invalid]),
403                                        px, sizeof(struct acpi_processor_px));
404                                 ++last_invalid;
405                         }
406                 }
407         }
408
409         if (last_invalid == 0) {
410                 printk(KERN_ERR FW_BUG PREFIX
411                        "No valid BIOS _PSS frequency found for processor %d\n", pr->id);
412                 result = -EFAULT;
413                 kfree(pr->performance->states);
414                 pr->performance->states = NULL;
415         }
416
417         if (last_invalid > 0)
418                 pr->performance->state_count = last_invalid;
419
420       end:
421         kfree(buffer.pointer);
422
423         return result;
424 }
425
426 int acpi_processor_get_performance_info(struct acpi_processor *pr)
427 {
428         int result = 0;
429
430         if (!pr || !pr->performance || !pr->handle)
431                 return -EINVAL;
432
433         if (!acpi_has_method(pr->handle, "_PCT")) {
434                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
435                                   "ACPI-based processor performance control unavailable\n"));
436                 return -ENODEV;
437         }
438
439         result = acpi_processor_get_performance_control(pr);
440         if (result)
441                 goto update_bios;
442
443         result = acpi_processor_get_performance_states(pr);
444         if (result)
445                 goto update_bios;
446
447         /* We need to call _PPC once when cpufreq starts */
448         if (ignore_ppc != 1)
449                 result = acpi_processor_get_platform_limit(pr);
450
451         return result;
452
453         /*
454          * Having _PPC but missing frequencies (_PSS, _PCT) is a very good hint that
455          * the BIOS is older than the CPU and does not know its frequencies
456          */
457  update_bios:
458 #ifdef CONFIG_X86
459         if (acpi_has_method(pr->handle, "_PPC")) {
460                 if(boot_cpu_has(X86_FEATURE_EST))
461                         printk(KERN_WARNING FW_BUG "BIOS needs update for CPU "
462                                "frequency support\n");
463         }
464 #endif
465         return result;
466 }
467 EXPORT_SYMBOL_GPL(acpi_processor_get_performance_info);
468 int acpi_processor_notify_smm(struct module *calling_module)
469 {
470         acpi_status status;
471         static int is_done = 0;
472
473
474         if (!(acpi_processor_ppc_status & PPC_REGISTERED))
475                 return -EBUSY;
476
477         if (!try_module_get(calling_module))
478                 return -EINVAL;
479
480         /* is_done is set to negative if an error occurred,
481          * and to postitive if _no_ error occurred, but SMM
482          * was already notified. This avoids double notification
483          * which might lead to unexpected results...
484          */
485         if (is_done > 0) {
486                 module_put(calling_module);
487                 return 0;
488         } else if (is_done < 0) {
489                 module_put(calling_module);
490                 return is_done;
491         }
492
493         is_done = -EIO;
494
495         /* Can't write pstate_control to smi_command if either value is zero */
496         if ((!acpi_gbl_FADT.smi_command) || (!acpi_gbl_FADT.pstate_control)) {
497                 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No SMI port or pstate_control\n"));
498                 module_put(calling_module);
499                 return 0;
500         }
501
502         ACPI_DEBUG_PRINT((ACPI_DB_INFO,
503                           "Writing pstate_control [0x%x] to smi_command [0x%x]\n",
504                           acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command));
505
506         status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
507                                     (u32) acpi_gbl_FADT.pstate_control, 8);
508         if (ACPI_FAILURE(status)) {
509                 ACPI_EXCEPTION((AE_INFO, status,
510                                 "Failed to write pstate_control [0x%x] to "
511                                 "smi_command [0x%x]", acpi_gbl_FADT.pstate_control,
512                                 acpi_gbl_FADT.smi_command));
513                 module_put(calling_module);
514                 return status;
515         }
516
517         /* Success. If there's no _PPC, we need to fear nothing, so
518          * we can allow the cpufreq driver to be rmmod'ed. */
519         is_done = 1;
520
521         if (!(acpi_processor_ppc_status & PPC_IN_USE))
522                 module_put(calling_module);
523
524         return 0;
525 }
526
527 EXPORT_SYMBOL(acpi_processor_notify_smm);
528
529 static int acpi_processor_get_psd(struct acpi_processor *pr)
530 {
531         int result = 0;
532         acpi_status status = AE_OK;
533         struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
534         struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
535         struct acpi_buffer state = {0, NULL};
536         union acpi_object  *psd = NULL;
537         struct acpi_psd_package *pdomain;
538
539         status = acpi_evaluate_object(pr->handle, "_PSD", NULL, &buffer);
540         if (ACPI_FAILURE(status)) {
541                 return -ENODEV;
542         }
543
544         psd = buffer.pointer;
545         if (!psd || (psd->type != ACPI_TYPE_PACKAGE)) {
546                 printk(KERN_ERR PREFIX "Invalid _PSD data\n");
547                 result = -EFAULT;
548                 goto end;
549         }
550
551         if (psd->package.count != 1) {
552                 printk(KERN_ERR PREFIX "Invalid _PSD data\n");
553                 result = -EFAULT;
554                 goto end;
555         }
556
557         pdomain = &(pr->performance->domain_info);
558
559         state.length = sizeof(struct acpi_psd_package);
560         state.pointer = pdomain;
561
562         status = acpi_extract_package(&(psd->package.elements[0]),
563                 &format, &state);
564         if (ACPI_FAILURE(status)) {
565                 printk(KERN_ERR PREFIX "Invalid _PSD data\n");
566                 result = -EFAULT;
567                 goto end;
568         }
569
570         if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
571                 printk(KERN_ERR PREFIX "Unknown _PSD:num_entries\n");
572                 result = -EFAULT;
573                 goto end;
574         }
575
576         if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
577                 printk(KERN_ERR PREFIX "Unknown _PSD:revision\n");
578                 result = -EFAULT;
579                 goto end;
580         }
581
582         if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
583             pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
584             pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
585                 printk(KERN_ERR PREFIX "Invalid _PSD:coord_type\n");
586                 result = -EFAULT;
587                 goto end;
588         }
589 end:
590         kfree(buffer.pointer);
591         return result;
592 }
593
594 int acpi_processor_preregister_performance(
595                 struct acpi_processor_performance __percpu *performance)
596 {
597         int count_target;
598         int retval = 0;
599         unsigned int i, j;
600         cpumask_var_t covered_cpus;
601         struct acpi_processor *pr;
602         struct acpi_psd_package *pdomain;
603         struct acpi_processor *match_pr;
604         struct acpi_psd_package *match_pdomain;
605
606         if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
607                 return -ENOMEM;
608
609         mutex_lock(&performance_mutex);
610
611         /*
612          * Check if another driver has already registered, and abort before
613          * changing pr->performance if it has. Check input data as well.
614          */
615         for_each_possible_cpu(i) {
616                 pr = per_cpu(processors, i);
617                 if (!pr) {
618                         /* Look only at processors in ACPI namespace */
619                         continue;
620                 }
621
622                 if (pr->performance) {
623                         retval = -EBUSY;
624                         goto err_out;
625                 }
626
627                 if (!performance || !per_cpu_ptr(performance, i)) {
628                         retval = -EINVAL;
629                         goto err_out;
630                 }
631         }
632
633         /* Call _PSD for all CPUs */
634         for_each_possible_cpu(i) {
635                 pr = per_cpu(processors, i);
636                 if (!pr)
637                         continue;
638
639                 pr->performance = per_cpu_ptr(performance, i);
640                 cpumask_set_cpu(i, pr->performance->shared_cpu_map);
641                 if (acpi_processor_get_psd(pr)) {
642                         retval = -EINVAL;
643                         continue;
644                 }
645         }
646         if (retval)
647                 goto err_ret;
648
649         /*
650          * Now that we have _PSD data from all CPUs, lets setup P-state 
651          * domain info.
652          */
653         for_each_possible_cpu(i) {
654                 pr = per_cpu(processors, i);
655                 if (!pr)
656                         continue;
657
658                 if (cpumask_test_cpu(i, covered_cpus))
659                         continue;
660
661                 pdomain = &(pr->performance->domain_info);
662                 cpumask_set_cpu(i, pr->performance->shared_cpu_map);
663                 cpumask_set_cpu(i, covered_cpus);
664                 if (pdomain->num_processors <= 1)
665                         continue;
666
667                 /* Validate the Domain info */
668                 count_target = pdomain->num_processors;
669                 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
670                         pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
671                 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
672                         pr->performance->shared_type = CPUFREQ_SHARED_TYPE_HW;
673                 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
674                         pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ANY;
675
676                 for_each_possible_cpu(j) {
677                         if (i == j)
678                                 continue;
679
680                         match_pr = per_cpu(processors, j);
681                         if (!match_pr)
682                                 continue;
683
684                         match_pdomain = &(match_pr->performance->domain_info);
685                         if (match_pdomain->domain != pdomain->domain)
686                                 continue;
687
688                         /* Here i and j are in the same domain */
689
690                         if (match_pdomain->num_processors != count_target) {
691                                 retval = -EINVAL;
692                                 goto err_ret;
693                         }
694
695                         if (pdomain->coord_type != match_pdomain->coord_type) {
696                                 retval = -EINVAL;
697                                 goto err_ret;
698                         }
699
700                         cpumask_set_cpu(j, covered_cpus);
701                         cpumask_set_cpu(j, pr->performance->shared_cpu_map);
702                 }
703
704                 for_each_possible_cpu(j) {
705                         if (i == j)
706                                 continue;
707
708                         match_pr = per_cpu(processors, j);
709                         if (!match_pr)
710                                 continue;
711
712                         match_pdomain = &(match_pr->performance->domain_info);
713                         if (match_pdomain->domain != pdomain->domain)
714                                 continue;
715
716                         match_pr->performance->shared_type = 
717                                         pr->performance->shared_type;
718                         cpumask_copy(match_pr->performance->shared_cpu_map,
719                                      pr->performance->shared_cpu_map);
720                 }
721         }
722
723 err_ret:
724         for_each_possible_cpu(i) {
725                 pr = per_cpu(processors, i);
726                 if (!pr || !pr->performance)
727                         continue;
728
729                 /* Assume no coordination on any error parsing domain info */
730                 if (retval) {
731                         cpumask_clear(pr->performance->shared_cpu_map);
732                         cpumask_set_cpu(i, pr->performance->shared_cpu_map);
733                         pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
734                 }
735                 pr->performance = NULL; /* Will be set for real in register */
736         }
737
738 err_out:
739         mutex_unlock(&performance_mutex);
740         free_cpumask_var(covered_cpus);
741         return retval;
742 }
743 EXPORT_SYMBOL(acpi_processor_preregister_performance);
744
745 int
746 acpi_processor_register_performance(struct acpi_processor_performance
747                                     *performance, unsigned int cpu)
748 {
749         struct acpi_processor *pr;
750
751         if (!(acpi_processor_ppc_status & PPC_REGISTERED))
752                 return -EINVAL;
753
754         mutex_lock(&performance_mutex);
755
756         pr = per_cpu(processors, cpu);
757         if (!pr) {
758                 mutex_unlock(&performance_mutex);
759                 return -ENODEV;
760         }
761
762         if (pr->performance) {
763                 mutex_unlock(&performance_mutex);
764                 return -EBUSY;
765         }
766
767         WARN_ON(!performance);
768
769         pr->performance = performance;
770
771         if (acpi_processor_get_performance_info(pr)) {
772                 pr->performance = NULL;
773                 mutex_unlock(&performance_mutex);
774                 return -EIO;
775         }
776
777         mutex_unlock(&performance_mutex);
778         return 0;
779 }
780
781 EXPORT_SYMBOL(acpi_processor_register_performance);
782
783 void acpi_processor_unregister_performance(unsigned int cpu)
784 {
785         struct acpi_processor *pr;
786
787         mutex_lock(&performance_mutex);
788
789         pr = per_cpu(processors, cpu);
790         if (!pr) {
791                 mutex_unlock(&performance_mutex);
792                 return;
793         }
794
795         if (pr->performance)
796                 kfree(pr->performance->states);
797         pr->performance = NULL;
798
799         mutex_unlock(&performance_mutex);
800
801         return;
802 }
803
804 EXPORT_SYMBOL(acpi_processor_unregister_performance);