2 * Copyright 2013 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <subdev/bios.h>
27 #include <subdev/bios/boost.h>
28 #include <subdev/bios/cstep.h>
29 #include <subdev/bios/perf.h>
30 #include <subdev/bios/vpstate.h>
31 #include <subdev/fb.h>
32 #include <subdev/therm.h>
33 #include <subdev/volt.h>
35 #include <core/option.h>
37 /******************************************************************************
39 *****************************************************************************/
41 nvkm_clk_adjust(struct nvkm_clk *clk, bool adjust,
42 u8 pstate, u8 domain, u32 input)
44 struct nvkm_bios *bios = clk->subdev.device->bios;
45 struct nvbios_boostE boostE;
46 u8 ver, hdr, cnt, len;
49 data = nvbios_boostEm(bios, pstate, &ver, &hdr, &cnt, &len, &boostE);
51 struct nvbios_boostS boostS;
52 u8 idx = 0, sver, shdr;
55 input = max(boostE.min, input);
56 input = min(boostE.max, input);
60 subd = nvbios_boostSp(bios, idx++, data, &sver, &shdr,
62 if (subd && boostS.domain == domain) {
64 input = input * boostS.percent / 100;
65 input = max(boostS.min, input);
66 input = min(boostS.max, input);
75 /******************************************************************************
77 *****************************************************************************/
79 nvkm_cstate_valid(struct nvkm_clk *clk, struct nvkm_cstate *cstate,
80 u32 max_volt, int temp)
82 const struct nvkm_domain *domain = clk->domains;
83 struct nvkm_volt *volt = clk->subdev.device->volt;
86 while (domain && domain->name != nv_clk_src_max) {
87 if (domain->flags & NVKM_CLK_DOM_FLAG_VPSTATE) {
88 u32 freq = cstate->domain[domain->name];
89 switch (clk->boost_mode) {
90 case NVKM_CLK_BOOST_NONE:
91 if (clk->base_khz && freq > clk->base_khz)
93 case NVKM_CLK_BOOST_BIOS:
94 if (clk->boost_khz && freq > clk->boost_khz)
104 voltage = nvkm_volt_map(volt, cstate->voltage, temp);
107 return voltage <= min(max_volt, volt->max_uv);
110 static struct nvkm_cstate *
111 nvkm_cstate_find_best(struct nvkm_clk *clk, struct nvkm_pstate *pstate,
112 struct nvkm_cstate *start)
114 struct nvkm_device *device = clk->subdev.device;
115 struct nvkm_volt *volt = device->volt;
116 struct nvkm_cstate *cstate;
119 if (!pstate || !start)
125 max_volt = volt->max_uv;
126 if (volt->max0_id != 0xff)
127 max_volt = min(max_volt,
128 nvkm_volt_map(volt, volt->max0_id, clk->temp));
129 if (volt->max1_id != 0xff)
130 max_volt = min(max_volt,
131 nvkm_volt_map(volt, volt->max1_id, clk->temp));
132 if (volt->max2_id != 0xff)
133 max_volt = min(max_volt,
134 nvkm_volt_map(volt, volt->max2_id, clk->temp));
136 for (cstate = start; &cstate->head != &pstate->list;
137 cstate = list_entry(cstate->head.prev, typeof(*cstate), head)) {
138 if (nvkm_cstate_valid(clk, cstate, max_volt, clk->temp))
145 static struct nvkm_cstate *
146 nvkm_cstate_get(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
148 struct nvkm_cstate *cstate;
149 if (cstatei == NVKM_CLK_CSTATE_HIGHEST)
150 return list_last_entry(&pstate->list, typeof(*cstate), head);
152 list_for_each_entry(cstate, &pstate->list, head) {
153 if (cstate->id == cstatei)
161 nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
163 struct nvkm_subdev *subdev = &clk->subdev;
164 struct nvkm_device *device = subdev->device;
165 struct nvkm_therm *therm = device->therm;
166 struct nvkm_volt *volt = device->volt;
167 struct nvkm_cstate *cstate;
170 if (!list_empty(&pstate->list)) {
171 cstate = nvkm_cstate_get(clk, pstate, cstatei);
172 cstate = nvkm_cstate_find_best(clk, pstate, cstate);
176 cstate = &pstate->base;
180 ret = nvkm_therm_cstate(therm, pstate->fanspeed, +1);
181 if (ret && ret != -ENODEV) {
182 nvkm_error(subdev, "failed to raise fan speed: %d\n", ret);
188 ret = nvkm_volt_set_id(volt, cstate->voltage,
189 pstate->base.voltage, clk->temp, +1);
190 if (ret && ret != -ENODEV) {
191 nvkm_error(subdev, "failed to raise voltage: %d\n", ret);
196 ret = clk->func->calc(clk, cstate);
198 ret = clk->func->prog(clk);
199 clk->func->tidy(clk);
203 ret = nvkm_volt_set_id(volt, cstate->voltage,
204 pstate->base.voltage, clk->temp, -1);
205 if (ret && ret != -ENODEV)
206 nvkm_error(subdev, "failed to lower voltage: %d\n", ret);
210 ret = nvkm_therm_cstate(therm, pstate->fanspeed, -1);
211 if (ret && ret != -ENODEV)
212 nvkm_error(subdev, "failed to lower fan speed: %d\n", ret);
219 nvkm_cstate_del(struct nvkm_cstate *cstate)
221 list_del(&cstate->head);
226 nvkm_cstate_new(struct nvkm_clk *clk, int idx, struct nvkm_pstate *pstate)
228 struct nvkm_bios *bios = clk->subdev.device->bios;
229 struct nvkm_volt *volt = clk->subdev.device->volt;
230 const struct nvkm_domain *domain = clk->domains;
231 struct nvkm_cstate *cstate = NULL;
232 struct nvbios_cstepX cstepX;
236 data = nvbios_cstepXp(bios, idx, &ver, &hdr, &cstepX);
240 if (volt && nvkm_volt_map_min(volt, cstepX.voltage) > volt->max_uv)
243 cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
247 *cstate = pstate->base;
248 cstate->voltage = cstepX.voltage;
251 while (domain && domain->name != nv_clk_src_max) {
252 if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) {
253 u32 freq = nvkm_clk_adjust(clk, true, pstate->pstate,
254 domain->bios, cstepX.freq);
255 cstate->domain[domain->name] = freq;
260 list_add(&cstate->head, &pstate->list);
264 /******************************************************************************
266 *****************************************************************************/
268 nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
270 struct nvkm_subdev *subdev = &clk->subdev;
271 struct nvkm_fb *fb = subdev->device->fb;
272 struct nvkm_pci *pci = subdev->device->pci;
273 struct nvkm_pstate *pstate;
276 list_for_each_entry(pstate, &clk->states, head) {
277 if (idx++ == pstatei)
281 nvkm_debug(subdev, "setting performance state %d\n", pstatei);
282 clk->pstate = pstatei;
284 nvkm_pcie_set_link(pci, pstate->pcie_speed, pstate->pcie_width);
286 if (fb && fb->ram && fb->ram->func->calc) {
287 struct nvkm_ram *ram = fb->ram;
288 int khz = pstate->base.domain[nv_clk_src_mem];
290 ret = ram->func->calc(ram, khz);
292 ret = ram->func->prog(ram);
294 ram->func->tidy(ram);
297 return nvkm_cstate_prog(clk, pstate, NVKM_CLK_CSTATE_HIGHEST);
301 nvkm_pstate_work(struct work_struct *work)
303 struct nvkm_clk *clk = container_of(work, typeof(*clk), work);
304 struct nvkm_subdev *subdev = &clk->subdev;
307 if (!atomic_xchg(&clk->waiting, 0))
309 clk->pwrsrc = power_supply_is_system_supplied();
311 nvkm_trace(subdev, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d°C D %d\n",
312 clk->pstate, clk->pwrsrc, clk->ustate_ac, clk->ustate_dc,
313 clk->astate, clk->temp, clk->dstate);
315 pstate = clk->pwrsrc ? clk->ustate_ac : clk->ustate_dc;
316 if (clk->state_nr && pstate != -1) {
317 pstate = (pstate < 0) ? clk->astate : pstate;
318 pstate = min(pstate, clk->state_nr - 1);
319 pstate = max(pstate, clk->dstate);
321 pstate = clk->pstate = -1;
324 nvkm_trace(subdev, "-> %d\n", pstate);
325 if (pstate != clk->pstate) {
326 int ret = nvkm_pstate_prog(clk, pstate);
328 nvkm_error(subdev, "error setting pstate %d: %d\n",
333 wake_up_all(&clk->wait);
334 nvkm_notify_get(&clk->pwrsrc_ntfy);
338 nvkm_pstate_calc(struct nvkm_clk *clk, bool wait)
340 atomic_set(&clk->waiting, 1);
341 schedule_work(&clk->work);
343 wait_event(clk->wait, !atomic_read(&clk->waiting));
348 nvkm_pstate_info(struct nvkm_clk *clk, struct nvkm_pstate *pstate)
350 const struct nvkm_domain *clock = clk->domains - 1;
351 struct nvkm_cstate *cstate;
352 struct nvkm_subdev *subdev = &clk->subdev;
353 char info[3][32] = { "", "", "" };
357 if (pstate->pstate != 0xff)
358 snprintf(name, sizeof(name), "%02x", pstate->pstate);
360 while ((++clock)->name != nv_clk_src_max) {
361 u32 lo = pstate->base.domain[clock->name];
366 nvkm_debug(subdev, "%02x: %10d KHz\n", clock->name, lo);
367 list_for_each_entry(cstate, &pstate->list, head) {
368 u32 freq = cstate->domain[clock->name];
371 nvkm_debug(subdev, "%10d KHz\n", freq);
374 if (clock->mname && ++i < ARRAY_SIZE(info)) {
378 snprintf(info[i], sizeof(info[i]), "%s %d MHz",
381 snprintf(info[i], sizeof(info[i]),
382 "%s %d-%d MHz", clock->mname, lo, hi);
387 nvkm_debug(subdev, "%s: %s %s %s\n", name, info[0], info[1], info[2]);
391 nvkm_pstate_del(struct nvkm_pstate *pstate)
393 struct nvkm_cstate *cstate, *temp;
395 list_for_each_entry_safe(cstate, temp, &pstate->list, head) {
396 nvkm_cstate_del(cstate);
399 list_del(&pstate->head);
404 nvkm_pstate_new(struct nvkm_clk *clk, int idx)
406 struct nvkm_bios *bios = clk->subdev.device->bios;
407 const struct nvkm_domain *domain = clk->domains - 1;
408 struct nvkm_pstate *pstate;
409 struct nvkm_cstate *cstate;
410 struct nvbios_cstepE cstepE;
411 struct nvbios_perfE perfE;
412 u8 ver, hdr, cnt, len;
415 data = nvbios_perfEp(bios, idx, &ver, &hdr, &cnt, &len, &perfE);
418 if (perfE.pstate == 0xff)
421 pstate = kzalloc(sizeof(*pstate), GFP_KERNEL);
422 cstate = &pstate->base;
426 INIT_LIST_HEAD(&pstate->list);
428 pstate->pstate = perfE.pstate;
429 pstate->fanspeed = perfE.fanspeed;
430 pstate->pcie_speed = perfE.pcie_speed;
431 pstate->pcie_width = perfE.pcie_width;
432 cstate->voltage = perfE.voltage;
433 cstate->domain[nv_clk_src_core] = perfE.core;
434 cstate->domain[nv_clk_src_shader] = perfE.shader;
435 cstate->domain[nv_clk_src_mem] = perfE.memory;
436 cstate->domain[nv_clk_src_vdec] = perfE.vdec;
437 cstate->domain[nv_clk_src_dom6] = perfE.disp;
439 while (ver >= 0x40 && (++domain)->name != nv_clk_src_max) {
440 struct nvbios_perfS perfS;
441 u8 sver = ver, shdr = hdr;
442 u32 perfSe = nvbios_perfSp(bios, data, domain->bios,
443 &sver, &shdr, cnt, len, &perfS);
444 if (perfSe == 0 || sver != 0x40)
447 if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) {
448 perfS.v40.freq = nvkm_clk_adjust(clk, false,
454 cstate->domain[domain->name] = perfS.v40.freq;
457 data = nvbios_cstepEm(bios, pstate->pstate, &ver, &hdr, &cstepE);
459 int idx = cstepE.index;
461 nvkm_cstate_new(clk, idx, pstate);
465 nvkm_pstate_info(clk, pstate);
466 list_add_tail(&pstate->head, &clk->states);
471 /******************************************************************************
472 * Adjustment triggers
473 *****************************************************************************/
475 nvkm_clk_ustate_update(struct nvkm_clk *clk, int req)
477 struct nvkm_pstate *pstate;
480 if (!clk->allow_reclock)
483 if (req != -1 && req != -2) {
484 list_for_each_entry(pstate, &clk->states, head) {
485 if (pstate->pstate == req)
490 if (pstate->pstate != req)
499 nvkm_clk_nstate(struct nvkm_clk *clk, const char *mode, int arglen)
503 if (clk->allow_reclock && !strncasecmpz(mode, "auto", arglen))
506 if (strncasecmpz(mode, "disabled", arglen)) {
507 char save = mode[arglen];
510 ((char *)mode)[arglen] = '\0';
511 if (!kstrtol(mode, 0, &v)) {
512 ret = nvkm_clk_ustate_update(clk, v);
516 ((char *)mode)[arglen] = save;
523 nvkm_clk_ustate(struct nvkm_clk *clk, int req, int pwr)
525 int ret = nvkm_clk_ustate_update(clk, req);
527 if (ret -= 2, pwr) clk->ustate_ac = ret;
528 else clk->ustate_dc = ret;
529 return nvkm_pstate_calc(clk, true);
535 nvkm_clk_astate(struct nvkm_clk *clk, int req, int rel, bool wait)
537 if (!rel) clk->astate = req;
538 if ( rel) clk->astate += rel;
539 clk->astate = min(clk->astate, clk->state_nr - 1);
540 clk->astate = max(clk->astate, 0);
541 return nvkm_pstate_calc(clk, wait);
545 nvkm_clk_tstate(struct nvkm_clk *clk, u8 temp)
547 if (clk->temp == temp)
550 return nvkm_pstate_calc(clk, false);
554 nvkm_clk_dstate(struct nvkm_clk *clk, int req, int rel)
556 if (!rel) clk->dstate = req;
557 if ( rel) clk->dstate += rel;
558 clk->dstate = min(clk->dstate, clk->state_nr - 1);
559 clk->dstate = max(clk->dstate, 0);
560 return nvkm_pstate_calc(clk, true);
564 nvkm_clk_pwrsrc(struct nvkm_notify *notify)
566 struct nvkm_clk *clk =
567 container_of(notify, typeof(*clk), pwrsrc_ntfy);
568 nvkm_pstate_calc(clk, false);
569 return NVKM_NOTIFY_DROP;
572 /******************************************************************************
573 * subdev base class implementation
574 *****************************************************************************/
577 nvkm_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
579 return clk->func->read(clk, src);
583 nvkm_clk_fini(struct nvkm_subdev *subdev, bool suspend)
585 struct nvkm_clk *clk = nvkm_clk(subdev);
586 nvkm_notify_put(&clk->pwrsrc_ntfy);
587 flush_work(&clk->work);
589 clk->func->fini(clk);
594 nvkm_clk_init(struct nvkm_subdev *subdev)
596 struct nvkm_clk *clk = nvkm_clk(subdev);
597 const struct nvkm_domain *clock = clk->domains;
600 memset(&clk->bstate, 0x00, sizeof(clk->bstate));
601 INIT_LIST_HEAD(&clk->bstate.list);
602 clk->bstate.pstate = 0xff;
604 while (clock->name != nv_clk_src_max) {
605 ret = nvkm_clk_read(clk, clock->name);
607 nvkm_error(subdev, "%02x freq unknown\n", clock->name);
610 clk->bstate.base.domain[clock->name] = ret;
614 nvkm_pstate_info(clk, &clk->bstate);
617 return clk->func->init(clk);
619 clk->astate = clk->state_nr - 1;
622 clk->temp = 90; /* reasonable default value */
623 nvkm_pstate_calc(clk, true);
628 nvkm_clk_dtor(struct nvkm_subdev *subdev)
630 struct nvkm_clk *clk = nvkm_clk(subdev);
631 struct nvkm_pstate *pstate, *temp;
633 nvkm_notify_fini(&clk->pwrsrc_ntfy);
635 /* Early return if the pstates have been provided statically */
636 if (clk->func->pstates)
639 list_for_each_entry_safe(pstate, temp, &clk->states, head) {
640 nvkm_pstate_del(pstate);
646 static const struct nvkm_subdev_func
648 .dtor = nvkm_clk_dtor,
649 .init = nvkm_clk_init,
650 .fini = nvkm_clk_fini,
654 nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device,
655 int index, bool allow_reclock, struct nvkm_clk *clk)
657 struct nvkm_subdev *subdev = &clk->subdev;
658 struct nvkm_bios *bios = device->bios;
659 int ret, idx, arglen;
661 struct nvbios_vpstate_header h;
663 nvkm_subdev_ctor(&nvkm_clk, device, index, subdev);
665 if (bios && !nvbios_vpstate_parse(bios, &h)) {
666 struct nvbios_vpstate_entry base, boost;
667 if (!nvbios_vpstate_entry(bios, &h, h.boost_id, &boost))
668 clk->boost_khz = boost.clock_mhz * 1000;
669 if (!nvbios_vpstate_entry(bios, &h, h.base_id, &base))
670 clk->base_khz = base.clock_mhz * 1000;
674 INIT_LIST_HEAD(&clk->states);
675 clk->domains = func->domains;
678 clk->allow_reclock = allow_reclock;
680 INIT_WORK(&clk->work, nvkm_pstate_work);
681 init_waitqueue_head(&clk->wait);
682 atomic_set(&clk->waiting, 0);
684 /* If no pstates are provided, try and fetch them from the BIOS */
685 if (!func->pstates) {
688 ret = nvkm_pstate_new(clk, idx++);
691 for (idx = 0; idx < func->nr_pstates; idx++)
692 list_add_tail(&func->pstates[idx].head, &clk->states);
693 clk->state_nr = func->nr_pstates;
696 ret = nvkm_notify_init(NULL, &device->event, nvkm_clk_pwrsrc, true,
697 NULL, 0, 0, &clk->pwrsrc_ntfy);
701 mode = nvkm_stropt(device->cfgopt, "NvClkMode", &arglen);
703 clk->ustate_ac = nvkm_clk_nstate(clk, mode, arglen);
704 clk->ustate_dc = nvkm_clk_nstate(clk, mode, arglen);
707 mode = nvkm_stropt(device->cfgopt, "NvClkModeAC", &arglen);
709 clk->ustate_ac = nvkm_clk_nstate(clk, mode, arglen);
711 mode = nvkm_stropt(device->cfgopt, "NvClkModeDC", &arglen);
713 clk->ustate_dc = nvkm_clk_nstate(clk, mode, arglen);
715 clk->boost_mode = nvkm_longopt(device->cfgopt, "NvBoost",
716 NVKM_CLK_BOOST_NONE);
721 nvkm_clk_new_(const struct nvkm_clk_func *func, struct nvkm_device *device,
722 int index, bool allow_reclock, struct nvkm_clk **pclk)
724 if (!(*pclk = kzalloc(sizeof(**pclk), GFP_KERNEL)))
726 return nvkm_clk_ctor(func, device, index, allow_reclock, *pclk);