2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
27 #include <core/client.h>
28 #include <core/oproxy.h>
29 #include <core/ramht.h>
30 #include <subdev/fb.h>
31 #include <subdev/timer.h>
32 #include <engine/dma.h>
34 struct nv50_disp_dmac_object {
35 struct nvkm_oproxy oproxy;
36 struct nv50_disp_root *root;
41 nv50_disp_dmac_child_del_(struct nvkm_oproxy *base)
43 struct nv50_disp_dmac_object *object =
44 container_of(base, typeof(*object), oproxy);
45 nvkm_ramht_remove(object->root->ramht, object->hash);
48 static const struct nvkm_oproxy_func
49 nv50_disp_dmac_child_func_ = {
50 .dtor[0] = nv50_disp_dmac_child_del_,
54 nv50_disp_dmac_child_new_(struct nv50_disp_chan *base,
55 const struct nvkm_oclass *oclass,
56 void *data, u32 size, struct nvkm_object **pobject)
58 struct nv50_disp_dmac *chan = nv50_disp_dmac(base);
59 struct nv50_disp_root *root = chan->base.root;
60 struct nvkm_device *device = root->disp->base.engine.subdev.device;
61 const struct nvkm_device_oclass *sclass = oclass->priv;
62 struct nv50_disp_dmac_object *object;
65 if (!(object = kzalloc(sizeof(*object), GFP_KERNEL)))
67 nvkm_oproxy_ctor(&nv50_disp_dmac_child_func_, oclass, &object->oproxy);
69 *pobject = &object->oproxy.base;
71 ret = sclass->ctor(device, oclass, data, size, &object->oproxy.object);
75 object->hash = chan->func->bind(chan, object->oproxy.object,
84 nv50_disp_dmac_child_get_(struct nv50_disp_chan *base, int index,
85 struct nvkm_oclass *sclass)
87 struct nv50_disp_dmac *chan = nv50_disp_dmac(base);
88 struct nv50_disp *disp = chan->base.root->disp;
89 struct nvkm_device *device = disp->base.engine.subdev.device;
90 const struct nvkm_device_oclass *oclass = NULL;
92 sclass->engine = nvkm_device_engine(device, NVKM_ENGINE_DMAOBJ);
93 if (sclass->engine && sclass->engine->func->base.sclass) {
94 sclass->engine->func->base.sclass(sclass, index, &oclass);
96 sclass->priv = oclass;
105 nv50_disp_dmac_fini_(struct nv50_disp_chan *base)
107 struct nv50_disp_dmac *chan = nv50_disp_dmac(base);
108 chan->func->fini(chan);
112 nv50_disp_dmac_init_(struct nv50_disp_chan *base)
114 struct nv50_disp_dmac *chan = nv50_disp_dmac(base);
115 return chan->func->init(chan);
119 nv50_disp_dmac_dtor_(struct nv50_disp_chan *base)
121 return nv50_disp_dmac(base);
124 static const struct nv50_disp_chan_func
125 nv50_disp_dmac_func_ = {
126 .dtor = nv50_disp_dmac_dtor_,
127 .init = nv50_disp_dmac_init_,
128 .fini = nv50_disp_dmac_fini_,
129 .child_get = nv50_disp_dmac_child_get_,
130 .child_new = nv50_disp_dmac_child_new_,
134 nv50_disp_dmac_new_(const struct nv50_disp_dmac_func *func,
135 const struct nv50_disp_chan_mthd *mthd,
136 struct nv50_disp_root *root, int chid, int head, u64 push,
137 const struct nvkm_oclass *oclass,
138 struct nvkm_object **pobject)
140 struct nvkm_device *device = root->disp->base.engine.subdev.device;
141 struct nvkm_client *client = oclass->client;
142 struct nvkm_dmaobj *dmaobj;
143 struct nv50_disp_dmac *chan;
146 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
148 *pobject = &chan->base.object;
151 ret = nv50_disp_chan_ctor(&nv50_disp_dmac_func_, mthd, root,
152 chid, chid, head, oclass, &chan->base);
156 dmaobj = nvkm_dma_search(device->dma, client, push);
160 if (dmaobj->limit - dmaobj->start != 0xfff)
163 switch (dmaobj->target) {
164 case NV_MEM_TARGET_VRAM:
165 chan->push = 0x00000001 | dmaobj->start >> 8;
167 case NV_MEM_TARGET_PCI_NOSNOOP:
168 chan->push = 0x00000003 | dmaobj->start >> 8;
178 nv50_disp_dmac_bind(struct nv50_disp_dmac *chan,
179 struct nvkm_object *object, u32 handle)
181 return nvkm_ramht_insert(chan->base.root->ramht, object,
182 chan->base.chid.user, -10, handle,
183 chan->base.chid.user << 28 |
184 chan->base.chid.user);
188 nv50_disp_dmac_fini(struct nv50_disp_dmac *chan)
190 struct nv50_disp *disp = chan->base.root->disp;
191 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
192 struct nvkm_device *device = subdev->device;
193 int ctrl = chan->base.chid.ctrl;
194 int user = chan->base.chid.user;
196 /* deactivate channel */
197 nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00001010, 0x00001000);
198 nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00000003, 0x00000000);
199 if (nvkm_msec(device, 2000,
200 if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x001e0000))
203 nvkm_error(subdev, "ch %d fini timeout, %08x\n", user,
204 nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
207 /* disable error reporting and completion notifications */
208 nvkm_mask(device, 0x610028, 0x00010001 << user, 0x00000000 << user);
212 nv50_disp_dmac_init(struct nv50_disp_dmac *chan)
214 struct nv50_disp *disp = chan->base.root->disp;
215 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
216 struct nvkm_device *device = subdev->device;
217 int ctrl = chan->base.chid.ctrl;
218 int user = chan->base.chid.user;
220 /* enable error reporting */
221 nvkm_mask(device, 0x610028, 0x00010000 << user, 0x00010000 << user);
223 /* initialise channel for dma command submission */
224 nvkm_wr32(device, 0x610204 + (ctrl * 0x0010), chan->push);
225 nvkm_wr32(device, 0x610208 + (ctrl * 0x0010), 0x00010000);
226 nvkm_wr32(device, 0x61020c + (ctrl * 0x0010), ctrl);
227 nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00000010, 0x00000010);
228 nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000);
229 nvkm_wr32(device, 0x610200 + (ctrl * 0x0010), 0x00000013);
231 /* wait for it to go inactive */
232 if (nvkm_msec(device, 2000,
233 if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x80000000))
236 nvkm_error(subdev, "ch %d init timeout, %08x\n", user,
237 nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
244 const struct nv50_disp_dmac_func
245 nv50_disp_dmac_func = {
246 .init = nv50_disp_dmac_init,
247 .fini = nv50_disp_dmac_fini,
248 .bind = nv50_disp_dmac_bind,