2 * Copyright 2018 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
28 #include <core/gpuobj.h>
29 #include <subdev/timer.h>
32 gv100_disp_wndw_cnt(struct nvkm_disp *disp, unsigned long *pmask)
34 struct nvkm_device *device = disp->engine.subdev.device;
35 *pmask = nvkm_rd32(device, 0x610064);
36 return (nvkm_rd32(device, 0x610074) & 0x03f00000) >> 20;
40 gv100_disp_super(struct work_struct *work)
42 struct nv50_disp *disp =
43 container_of(work, struct nv50_disp, supervisor);
44 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
45 struct nvkm_device *device = subdev->device;
46 struct nvkm_head *head;
47 u32 stat = nvkm_rd32(device, 0x6107a8);
50 nvkm_debug(subdev, "supervisor %d: %08x\n", ffs(disp->super), stat);
51 list_for_each_entry(head, &disp->base.head, head) {
52 mask[head->id] = nvkm_rd32(device, 0x6107ac + (head->id * 4));
53 HEAD_DBG(head, "%08x", mask[head->id]);
56 if (disp->super & 0x00000001) {
57 nv50_disp_chan_mthd(disp->chan[0], NV_DBG_DEBUG);
58 nv50_disp_super_1(disp);
59 list_for_each_entry(head, &disp->base.head, head) {
60 if (!(mask[head->id] & 0x00001000))
62 nv50_disp_super_1_0(disp, head);
65 if (disp->super & 0x00000002) {
66 list_for_each_entry(head, &disp->base.head, head) {
67 if (!(mask[head->id] & 0x00001000))
69 nv50_disp_super_2_0(disp, head);
71 nvkm_outp_route(&disp->base);
72 list_for_each_entry(head, &disp->base.head, head) {
73 if (!(mask[head->id] & 0x00010000))
75 nv50_disp_super_2_1(disp, head);
77 list_for_each_entry(head, &disp->base.head, head) {
78 if (!(mask[head->id] & 0x00001000))
80 nv50_disp_super_2_2(disp, head);
83 if (disp->super & 0x00000004) {
84 list_for_each_entry(head, &disp->base.head, head) {
85 if (!(mask[head->id] & 0x00001000))
87 nv50_disp_super_3_0(disp, head);
91 list_for_each_entry(head, &disp->base.head, head)
92 nvkm_wr32(device, 0x6107ac + (head->id * 4), 0x00000000);
93 nvkm_wr32(device, 0x6107a8, 0x80000000);
97 gv100_disp_exception(struct nv50_disp *disp, int chid)
99 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
100 struct nvkm_device *device = subdev->device;
101 u32 stat = nvkm_rd32(device, 0x611020 + (chid * 12));
102 u32 type = (stat & 0x00007000) >> 12;
103 u32 mthd = (stat & 0x00000fff) << 2;
104 u32 data = nvkm_rd32(device, 0x611024 + (chid * 12));
105 u32 code = nvkm_rd32(device, 0x611028 + (chid * 12));
107 nvkm_error(subdev, "chid %d %08x [type %d mthd %04x] "
108 "data %08x code %08x\n",
109 chid, stat, type, mthd, data, code);
111 if (chid < ARRAY_SIZE(disp->chan) && disp->chan[chid]) {
114 nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
121 nvkm_wr32(device, 0x611020 + (chid * 12), 0x90000000);
125 gv100_disp_intr_ctrl_disp(struct nv50_disp *disp)
127 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
128 struct nvkm_device *device = subdev->device;
129 u32 stat = nvkm_rd32(device, 0x611c30);
131 if (stat & 0x00000007) {
132 disp->super = (stat & 0x00000007);
133 queue_work(disp->wq, &disp->supervisor);
134 nvkm_wr32(device, 0x611860, disp->super);
138 /*TODO: I would guess this is VBIOS_RELEASE, however, NFI how to
139 * ACK it, nor does RM appear to bother.
141 if (stat & 0x00000008)
144 if (stat & 0x00000100) {
145 unsigned long wndws = nvkm_rd32(device, 0x611858);
146 unsigned long other = nvkm_rd32(device, 0x61185c);
149 nvkm_wr32(device, 0x611858, wndws);
150 nvkm_wr32(device, 0x61185c, other);
152 /* AWAKEN_OTHER_CORE. */
153 if (other & 0x00000001)
154 nv50_disp_chan_uevent_send(disp, 0);
156 /* AWAKEN_WIN_CH(n). */
157 for_each_set_bit(wndw, &wndws, disp->wndw.nr) {
158 nv50_disp_chan_uevent_send(disp, 1 + wndw);
163 nvkm_warn(subdev, "ctrl %08x\n", stat);
167 gv100_disp_intr_exc_other(struct nv50_disp *disp)
169 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
170 struct nvkm_device *device = subdev->device;
171 u32 stat = nvkm_rd32(device, 0x611854);
175 if (stat & 0x00000001) {
176 nvkm_wr32(device, 0x611854, 0x00000001);
177 gv100_disp_exception(disp, 0);
181 if ((mask = (stat & 0x00ff0000) >> 16)) {
182 for_each_set_bit(head, &mask, disp->wndw.nr) {
183 nvkm_wr32(device, 0x611854, 0x00010000 << head);
184 gv100_disp_exception(disp, 73 + head);
185 stat &= ~(0x00010000 << head);
190 nvkm_warn(subdev, "exception %08x\n", stat);
191 nvkm_wr32(device, 0x611854, stat);
196 gv100_disp_intr_exc_winim(struct nv50_disp *disp)
198 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
199 struct nvkm_device *device = subdev->device;
200 unsigned long stat = nvkm_rd32(device, 0x611850);
203 for_each_set_bit(wndw, &stat, disp->wndw.nr) {
204 nvkm_wr32(device, 0x611850, BIT(wndw));
205 gv100_disp_exception(disp, 33 + wndw);
210 nvkm_warn(subdev, "wimm %08x\n", (u32)stat);
211 nvkm_wr32(device, 0x611850, stat);
216 gv100_disp_intr_exc_win(struct nv50_disp *disp)
218 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
219 struct nvkm_device *device = subdev->device;
220 unsigned long stat = nvkm_rd32(device, 0x61184c);
223 for_each_set_bit(wndw, &stat, disp->wndw.nr) {
224 nvkm_wr32(device, 0x61184c, BIT(wndw));
225 gv100_disp_exception(disp, 1 + wndw);
230 nvkm_warn(subdev, "wndw %08x\n", (u32)stat);
231 nvkm_wr32(device, 0x61184c, stat);
236 gv100_disp_intr_head_timing(struct nv50_disp *disp, int head)
238 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
239 struct nvkm_device *device = subdev->device;
240 u32 stat = nvkm_rd32(device, 0x611800 + (head * 0x04));
242 /* LAST_DATA, LOADV. */
243 if (stat & 0x00000003) {
244 nvkm_wr32(device, 0x611800 + (head * 0x04), stat & 0x00000003);
248 if (stat & 0x00000004) {
249 nvkm_disp_vblank(&disp->base, head);
250 nvkm_wr32(device, 0x611800 + (head * 0x04), 0x00000004);
255 nvkm_warn(subdev, "head %08x\n", stat);
256 nvkm_wr32(device, 0x611800 + (head * 0x04), stat);
261 gv100_disp_intr(struct nv50_disp *disp)
263 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
264 struct nvkm_device *device = subdev->device;
265 u32 stat = nvkm_rd32(device, 0x611ec0);
269 if ((mask = (stat & 0x000000ff))) {
270 for_each_set_bit(head, &mask, 8) {
271 gv100_disp_intr_head_timing(disp, head);
276 if (stat & 0x00000200) {
277 gv100_disp_intr_exc_win(disp);
281 if (stat & 0x00000400) {
282 gv100_disp_intr_exc_winim(disp);
286 if (stat & 0x00000800) {
287 gv100_disp_intr_exc_other(disp);
291 if (stat & 0x00001000) {
292 gv100_disp_intr_ctrl_disp(disp);
297 nvkm_warn(subdev, "intr %08x\n", stat);
301 gv100_disp_fini(struct nv50_disp *disp)
303 struct nvkm_device *device = disp->base.engine.subdev.device;
304 nvkm_wr32(device, 0x611db0, 0x00000000);
308 gv100_disp_init(struct nv50_disp *disp)
310 struct nvkm_device *device = disp->base.engine.subdev.device;
311 struct nvkm_head *head;
315 /* Claim ownership of display. */
316 if (nvkm_rd32(device, 0x6254e8) & 0x00000002) {
317 nvkm_mask(device, 0x6254e8, 0x00000001, 0x00000000);
318 if (nvkm_msec(device, 2000,
319 if (!(nvkm_rd32(device, 0x6254e8) & 0x00000002))
325 /* Lock pin capabilities. */
326 tmp = nvkm_rd32(device, 0x610068);
327 nvkm_wr32(device, 0x640008, tmp);
329 /* SOR capabilities. */
330 for (i = 0; i < disp->sor.nr; i++) {
331 tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
332 nvkm_mask(device, 0x640000, 0x00000100 << i, 0x00000100 << i);
333 nvkm_wr32(device, 0x640144 + (i * 0x08), tmp);
336 /* Head capabilities. */
337 list_for_each_entry(head, &disp->base.head, head) {
338 const int id = head->id;
341 tmp = nvkm_rd32(device, 0x616300 + (id * 0x800));
342 nvkm_wr32(device, 0x640048 + (id * 0x020), tmp);
345 for (j = 0; j < 6 * 4; j += 4) {
346 tmp = nvkm_rd32(device, 0x616100 + (id * 0x800) + j);
347 nvkm_wr32(device, 0x640030 + (id * 0x20) + j, tmp);
351 /* Window capabilities. */
352 for (i = 0; i < disp->wndw.nr; i++) {
353 nvkm_mask(device, 0x640004, 1 << i, 1 << i);
354 for (j = 0; j < 6 * 4; j += 4) {
355 tmp = nvkm_rd32(device, 0x630050 + (i * 0x800) + j);
356 nvkm_wr32(device, 0x6401e4 + (i * 0x20) + j, tmp);
360 /* IHUB capabilities. */
361 for (i = 0; i < 4; i++) {
362 tmp = nvkm_rd32(device, 0x62e000 + (i * 0x04));
363 nvkm_wr32(device, 0x640010 + (i * 0x04), tmp);
366 nvkm_mask(device, 0x610078, 0x00000001, 0x00000001);
368 /* Setup instance memory. */
369 switch (nvkm_memory_target(disp->inst->memory)) {
370 case NVKM_MEM_TARGET_VRAM: tmp = 0x00000001; break;
371 case NVKM_MEM_TARGET_NCOH: tmp = 0x00000002; break;
372 case NVKM_MEM_TARGET_HOST: tmp = 0x00000003; break;
376 nvkm_wr32(device, 0x610010, 0x00000008 | tmp);
377 nvkm_wr32(device, 0x610014, disp->inst->addr >> 16);
379 /* CTRL_DISP: AWAKEN, ERROR, SUPERVISOR[1-3]. */
380 nvkm_wr32(device, 0x611cf0, 0x00000187); /* MSK. */
381 nvkm_wr32(device, 0x611db0, 0x00000187); /* EN. */
383 /* EXC_OTHER: CURSn, CORE. */
384 nvkm_wr32(device, 0x611cec, disp->head.mask << 16 |
385 0x00000001); /* MSK. */
386 nvkm_wr32(device, 0x611dac, 0x00000000); /* EN. */
389 nvkm_wr32(device, 0x611ce8, disp->wndw.mask); /* MSK. */
390 nvkm_wr32(device, 0x611da8, 0x00000000); /* EN. */
393 nvkm_wr32(device, 0x611ce4, disp->wndw.mask); /* MSK. */
394 nvkm_wr32(device, 0x611da4, 0x00000000); /* EN. */
396 /* HEAD_TIMING(n): VBLANK. */
397 list_for_each_entry(head, &disp->base.head, head) {
398 const u32 hoff = head->id * 4;
399 nvkm_wr32(device, 0x611cc0 + hoff, 0x00000004); /* MSK. */
400 nvkm_wr32(device, 0x611d80 + hoff, 0x00000000); /* EN. */
404 nvkm_wr32(device, 0x611cf4, 0x00000000); /* MSK. */
405 nvkm_wr32(device, 0x611db4, 0x00000000); /* EN. */
409 static const struct nv50_disp_func
411 .init = gv100_disp_init,
412 .fini = gv100_disp_fini,
413 .intr = gv100_disp_intr,
414 .uevent = &gv100_disp_chan_uevent,
415 .super = gv100_disp_super,
416 .root = &gv100_disp_root_oclass,
417 .wndw = { .cnt = gv100_disp_wndw_cnt },
418 .head = { .cnt = gv100_head_cnt, .new = gv100_head_new },
419 .sor = { .cnt = gv100_sor_cnt, .new = gv100_sor_new },
420 .ramht_size = 0x2000,
424 gv100_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
426 return nv50_disp_new_(&gv100_disp, device, index, pdisp);