GNU Linux-libre 4.14.251-gnu1
[releases.git] / arch / powerpc / platforms / cell / cpufreq_spudemand.c
1 /*
2  * spu aware cpufreq governor for the cell processor
3  *
4  * © Copyright IBM Corporation 2006-2008
5  *
6  * Author: Christian Krafft <krafft@de.ibm.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2, or (at your option)
11  * any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22
23 #include <linux/cpufreq.h>
24 #include <linux/sched.h>
25 #include <linux/sched/loadavg.h>
26 #include <linux/module.h>
27 #include <linux/timer.h>
28 #include <linux/workqueue.h>
29 #include <linux/atomic.h>
30 #include <asm/machdep.h>
31 #include <asm/spu.h>
32
33 #define POLL_TIME       100000          /* in µs */
34 #define EXP             753             /* exp(-1) in fixed-point */
35
36 struct spu_gov_info_struct {
37         unsigned long busy_spus;        /* fixed-point */
38         struct cpufreq_policy *policy;
39         struct delayed_work work;
40         unsigned int poll_int;          /* µs */
41 };
42 static DEFINE_PER_CPU(struct spu_gov_info_struct, spu_gov_info);
43
44 static int calc_freq(struct spu_gov_info_struct *info)
45 {
46         int cpu;
47         int busy_spus;
48
49         cpu = info->policy->cpu;
50         busy_spus = atomic_read(&cbe_spu_info[cpu_to_node(cpu)].busy_spus);
51
52         CALC_LOAD(info->busy_spus, EXP, busy_spus * FIXED_1);
53         pr_debug("cpu %d: busy_spus=%d, info->busy_spus=%ld\n",
54                         cpu, busy_spus, info->busy_spus);
55
56         return info->policy->max * info->busy_spus / FIXED_1;
57 }
58
59 static void spu_gov_work(struct work_struct *work)
60 {
61         struct spu_gov_info_struct *info;
62         int delay;
63         unsigned long target_freq;
64
65         info = container_of(work, struct spu_gov_info_struct, work.work);
66
67         /* after cancel_delayed_work_sync we unset info->policy */
68         BUG_ON(info->policy == NULL);
69
70         target_freq = calc_freq(info);
71         __cpufreq_driver_target(info->policy, target_freq, CPUFREQ_RELATION_H);
72
73         delay = usecs_to_jiffies(info->poll_int);
74         schedule_delayed_work_on(info->policy->cpu, &info->work, delay);
75 }
76
77 static void spu_gov_init_work(struct spu_gov_info_struct *info)
78 {
79         int delay = usecs_to_jiffies(info->poll_int);
80         INIT_DEFERRABLE_WORK(&info->work, spu_gov_work);
81         schedule_delayed_work_on(info->policy->cpu, &info->work, delay);
82 }
83
84 static void spu_gov_cancel_work(struct spu_gov_info_struct *info)
85 {
86         cancel_delayed_work_sync(&info->work);
87 }
88
89 static int spu_gov_start(struct cpufreq_policy *policy)
90 {
91         unsigned int cpu = policy->cpu;
92         struct spu_gov_info_struct *info = &per_cpu(spu_gov_info, cpu);
93         struct spu_gov_info_struct *affected_info;
94         int i;
95
96         if (!cpu_online(cpu)) {
97                 printk(KERN_ERR "cpu %d is not online\n", cpu);
98                 return -EINVAL;
99         }
100
101         if (!policy->cur) {
102                 printk(KERN_ERR "no cpu specified in policy\n");
103                 return -EINVAL;
104         }
105
106         /* initialize spu_gov_info for all affected cpus */
107         for_each_cpu(i, policy->cpus) {
108                 affected_info = &per_cpu(spu_gov_info, i);
109                 affected_info->policy = policy;
110         }
111
112         info->poll_int = POLL_TIME;
113
114         /* setup timer */
115         spu_gov_init_work(info);
116
117         return 0;
118 }
119
120 static void spu_gov_stop(struct cpufreq_policy *policy)
121 {
122         unsigned int cpu = policy->cpu;
123         struct spu_gov_info_struct *info = &per_cpu(spu_gov_info, cpu);
124         int i;
125
126         /* cancel timer */
127         spu_gov_cancel_work(info);
128
129         /* clean spu_gov_info for all affected cpus */
130         for_each_cpu (i, policy->cpus) {
131                 info = &per_cpu(spu_gov_info, i);
132                 info->policy = NULL;
133         }
134 }
135
136 static struct cpufreq_governor spu_governor = {
137         .name = "spudemand",
138         .start = spu_gov_start,
139         .stop = spu_gov_stop,
140         .owner = THIS_MODULE,
141 };
142
143 /*
144  * module init and destoy
145  */
146
147 static int __init spu_gov_init(void)
148 {
149         int ret;
150
151         ret = cpufreq_register_governor(&spu_governor);
152         if (ret)
153                 printk(KERN_ERR "registration of governor failed\n");
154         return ret;
155 }
156
157 static void __exit spu_gov_exit(void)
158 {
159         cpufreq_unregister_governor(&spu_governor);
160 }
161
162
163 module_init(spu_gov_init);
164 module_exit(spu_gov_exit);
165
166 MODULE_LICENSE("GPL");
167 MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
168