2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
27 #include <core/memory.h>
28 #include <core/option.h>
30 extern const u8 gf100_pte_storage_type_map[256];
33 gf100_fb_memtype_valid(struct nvkm_fb *fb, u32 tile_flags)
35 u8 memtype = (tile_flags & 0x0000ff00) >> 8;
36 return likely((gf100_pte_storage_type_map[memtype] != 0xff));
40 gf100_fb_intr(struct nvkm_fb *base)
42 struct gf100_fb *fb = gf100_fb(base);
43 struct nvkm_subdev *subdev = &fb->base.subdev;
44 struct nvkm_device *device = subdev->device;
45 u32 intr = nvkm_rd32(device, 0x000100);
46 if (intr & 0x08000000)
47 nvkm_debug(subdev, "PFFB intr\n");
48 if (intr & 0x00002000)
49 nvkm_debug(subdev, "PBFB intr\n");
53 gf100_fb_oneinit(struct nvkm_fb *base)
55 struct gf100_fb *fb = gf100_fb(base);
56 struct nvkm_device *device = fb->base.subdev.device;
57 int ret, size = 0x1000;
59 size = nvkm_longopt(device->cfgopt, "MmuDebugBufferSize", size);
60 size = min(size, 0x1000);
62 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000,
63 true, &fb->base.mmu_rd);
67 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000,
68 true, &fb->base.mmu_wr);
72 fb->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
73 if (fb->r100c10_page) {
74 fb->r100c10 = dma_map_page(device->dev, fb->r100c10_page, 0,
75 PAGE_SIZE, DMA_BIDIRECTIONAL);
76 if (dma_mapping_error(device->dev, fb->r100c10))
84 gf100_fb_init_page(struct nvkm_fb *fb)
86 struct nvkm_device *device = fb->subdev.device;
89 nvkm_mask(device, 0x100c80, 0x00000001, 0x00000001);
93 nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000);
100 gf100_fb_init(struct nvkm_fb *base)
102 struct gf100_fb *fb = gf100_fb(base);
103 struct nvkm_device *device = fb->base.subdev.device;
105 if (fb->r100c10_page)
106 nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8);
110 gf100_fb_dtor(struct nvkm_fb *base)
112 struct gf100_fb *fb = gf100_fb(base);
113 struct nvkm_device *device = fb->base.subdev.device;
115 if (fb->r100c10_page) {
116 dma_unmap_page(device->dev, fb->r100c10, PAGE_SIZE,
118 __free_page(fb->r100c10_page);
125 gf100_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
126 int index, struct nvkm_fb **pfb)
130 if (!(fb = kzalloc(sizeof(*fb), GFP_KERNEL)))
132 nvkm_fb_ctor(func, device, index, &fb->base);
138 static const struct nvkm_fb_func
140 .dtor = gf100_fb_dtor,
141 .oneinit = gf100_fb_oneinit,
142 .init = gf100_fb_init,
143 .init_page = gf100_fb_init_page,
144 .intr = gf100_fb_intr,
145 .ram_new = gf100_ram_new,
146 .memtype_valid = gf100_fb_memtype_valid,
150 gf100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
152 return gf100_fb_new_(&gf100_fb, device, index, pfb);