2 * Copyright 2013 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <subdev/timer.h>
29 nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
31 if (pmu && pmu->func->pgob)
32 pmu->func->pgob(pmu, enable);
36 nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
37 u32 process, u32 message, u32 data0, u32 data1)
39 struct nvkm_subdev *subdev = &pmu->subdev;
40 struct nvkm_device *device = subdev->device;
43 mutex_lock(&subdev->mutex);
44 /* wait for a free slot in the fifo */
45 addr = nvkm_rd32(device, 0x10a4a0);
46 if (nvkm_msec(device, 2000,
47 u32 tmp = nvkm_rd32(device, 0x10a4b0);
48 if (tmp != (addr ^ 8))
51 mutex_unlock(&subdev->mutex);
55 /* we currently only support a single process at a time waiting
56 * on a synchronous reply, take the PMU mutex and tell the
57 * receive handler what we're waiting for
60 pmu->recv.message = message;
61 pmu->recv.process = process;
64 /* acquire data segment access */
66 nvkm_wr32(device, 0x10a580, 0x00000001);
67 } while (nvkm_rd32(device, 0x10a580) != 0x00000001);
69 /* write the packet */
70 nvkm_wr32(device, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) +
72 nvkm_wr32(device, 0x10a1c4, process);
73 nvkm_wr32(device, 0x10a1c4, message);
74 nvkm_wr32(device, 0x10a1c4, data0);
75 nvkm_wr32(device, 0x10a1c4, data1);
76 nvkm_wr32(device, 0x10a4a0, (addr + 1) & 0x0f);
78 /* release data segment access */
79 nvkm_wr32(device, 0x10a580, 0x00000000);
81 /* wait for reply, if requested */
83 wait_event(pmu->recv.wait, (pmu->recv.process == 0));
84 reply[0] = pmu->recv.data[0];
85 reply[1] = pmu->recv.data[1];
88 mutex_unlock(&subdev->mutex);
93 nvkm_pmu_recv(struct work_struct *work)
95 struct nvkm_pmu *pmu = container_of(work, struct nvkm_pmu, recv.work);
96 struct nvkm_subdev *subdev = &pmu->subdev;
97 struct nvkm_device *device = subdev->device;
98 u32 process, message, data0, data1;
100 /* nothing to do if GET == PUT */
101 u32 addr = nvkm_rd32(device, 0x10a4cc);
102 if (addr == nvkm_rd32(device, 0x10a4c8))
105 /* acquire data segment access */
107 nvkm_wr32(device, 0x10a580, 0x00000002);
108 } while (nvkm_rd32(device, 0x10a580) != 0x00000002);
110 /* read the packet */
111 nvkm_wr32(device, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) +
113 process = nvkm_rd32(device, 0x10a1c4);
114 message = nvkm_rd32(device, 0x10a1c4);
115 data0 = nvkm_rd32(device, 0x10a1c4);
116 data1 = nvkm_rd32(device, 0x10a1c4);
117 nvkm_wr32(device, 0x10a4cc, (addr + 1) & 0x0f);
119 /* release data segment access */
120 nvkm_wr32(device, 0x10a580, 0x00000000);
122 /* wake process if it's waiting on a synchronous reply */
123 if (pmu->recv.process) {
124 if (process == pmu->recv.process &&
125 message == pmu->recv.message) {
126 pmu->recv.data[0] = data0;
127 pmu->recv.data[1] = data1;
128 pmu->recv.process = 0;
129 wake_up(&pmu->recv.wait);
134 /* right now there's no other expected responses from the engine,
135 * so assume that any unexpected message is an error.
137 nvkm_warn(subdev, "%c%c%c%c %08x %08x %08x %08x\n",
138 (char)((process & 0x000000ff) >> 0),
139 (char)((process & 0x0000ff00) >> 8),
140 (char)((process & 0x00ff0000) >> 16),
141 (char)((process & 0xff000000) >> 24),
142 process, message, data0, data1);
146 nvkm_pmu_intr(struct nvkm_subdev *subdev)
148 struct nvkm_pmu *pmu = nvkm_pmu(subdev);
149 struct nvkm_device *device = pmu->subdev.device;
150 u32 disp = nvkm_rd32(device, 0x10a01c);
151 u32 intr = nvkm_rd32(device, 0x10a008) & disp & ~(disp >> 16);
153 if (intr & 0x00000020) {
154 u32 stat = nvkm_rd32(device, 0x10a16c);
155 if (stat & 0x80000000) {
156 nvkm_error(subdev, "UAS fault at %06x addr %08x\n",
158 nvkm_rd32(device, 0x10a168));
159 nvkm_wr32(device, 0x10a16c, 0x00000000);
164 if (intr & 0x00000040) {
165 schedule_work(&pmu->recv.work);
166 nvkm_wr32(device, 0x10a004, 0x00000040);
170 if (intr & 0x00000080) {
171 nvkm_info(subdev, "wr32 %06x %08x\n",
172 nvkm_rd32(device, 0x10a7a0),
173 nvkm_rd32(device, 0x10a7a4));
174 nvkm_wr32(device, 0x10a004, 0x00000080);
179 nvkm_error(subdev, "intr %08x\n", intr);
180 nvkm_wr32(device, 0x10a004, intr);
185 nvkm_pmu_fini(struct nvkm_subdev *subdev, bool suspend)
187 struct nvkm_pmu *pmu = nvkm_pmu(subdev);
188 struct nvkm_device *device = pmu->subdev.device;
190 nvkm_wr32(device, 0x10a014, 0x00000060);
191 flush_work(&pmu->recv.work);
196 nvkm_pmu_init(struct nvkm_subdev *subdev)
198 struct nvkm_pmu *pmu = nvkm_pmu(subdev);
199 struct nvkm_device *device = pmu->subdev.device;
202 /* prevent previous ucode from running, wait for idle, reset */
203 nvkm_wr32(device, 0x10a014, 0x0000ffff); /* INTR_EN_CLR = ALL */
204 nvkm_msec(device, 2000,
205 if (!nvkm_rd32(device, 0x10a04c))
208 nvkm_mask(device, 0x000200, 0x00002000, 0x00000000);
209 nvkm_mask(device, 0x000200, 0x00002000, 0x00002000);
210 nvkm_rd32(device, 0x000200);
211 nvkm_msec(device, 2000,
212 if (!(nvkm_rd32(device, 0x10a10c) & 0x00000006))
216 /* upload data segment */
217 nvkm_wr32(device, 0x10a1c0, 0x01000000);
218 for (i = 0; i < pmu->func->data.size / 4; i++)
219 nvkm_wr32(device, 0x10a1c4, pmu->func->data.data[i]);
221 /* upload code segment */
222 nvkm_wr32(device, 0x10a180, 0x01000000);
223 for (i = 0; i < pmu->func->code.size / 4; i++) {
225 nvkm_wr32(device, 0x10a188, i >> 6);
226 nvkm_wr32(device, 0x10a184, pmu->func->code.data[i]);
229 /* start it running */
230 nvkm_wr32(device, 0x10a10c, 0x00000000);
231 nvkm_wr32(device, 0x10a104, 0x00000000);
232 nvkm_wr32(device, 0x10a100, 0x00000002);
234 /* wait for valid host->pmu ring configuration */
235 if (nvkm_msec(device, 2000,
236 if (nvkm_rd32(device, 0x10a4d0))
240 pmu->send.base = nvkm_rd32(device, 0x10a4d0) & 0x0000ffff;
241 pmu->send.size = nvkm_rd32(device, 0x10a4d0) >> 16;
243 /* wait for valid pmu->host ring configuration */
244 if (nvkm_msec(device, 2000,
245 if (nvkm_rd32(device, 0x10a4dc))
249 pmu->recv.base = nvkm_rd32(device, 0x10a4dc) & 0x0000ffff;
250 pmu->recv.size = nvkm_rd32(device, 0x10a4dc) >> 16;
252 nvkm_wr32(device, 0x10a010, 0x000000e0);
257 nvkm_pmu_dtor(struct nvkm_subdev *subdev)
259 return nvkm_pmu(subdev);
262 static const struct nvkm_subdev_func
264 .dtor = nvkm_pmu_dtor,
265 .init = nvkm_pmu_init,
266 .fini = nvkm_pmu_fini,
267 .intr = nvkm_pmu_intr,
271 nvkm_pmu_new_(const struct nvkm_pmu_func *func, struct nvkm_device *device,
272 int index, struct nvkm_pmu **ppmu)
274 struct nvkm_pmu *pmu;
275 if (!(pmu = *ppmu = kzalloc(sizeof(*pmu), GFP_KERNEL)))
277 nvkm_subdev_ctor(&nvkm_pmu, device, index, &pmu->subdev);
279 INIT_WORK(&pmu->recv.work, nvkm_pmu_recv);
280 init_waitqueue_head(&pmu->recv.wait);