GNU Linux-libre 4.19.268-gnu1
[releases.git] / drivers / gpu / drm / nouveau / nvkm / subdev / fb / gf100.c
1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "gf100.h"
25 #include "ram.h"
26
27 #include <core/memory.h>
28 #include <core/option.h>
29 #include <subdev/therm.h>
30
31 void
32 gf100_fb_intr(struct nvkm_fb *base)
33 {
34         struct gf100_fb *fb = gf100_fb(base);
35         struct nvkm_subdev *subdev = &fb->base.subdev;
36         struct nvkm_device *device = subdev->device;
37         u32 intr = nvkm_rd32(device, 0x000100);
38         if (intr & 0x08000000)
39                 nvkm_debug(subdev, "PFFB intr\n");
40         if (intr & 0x00002000)
41                 nvkm_debug(subdev, "PBFB intr\n");
42 }
43
44 int
45 gf100_fb_oneinit(struct nvkm_fb *base)
46 {
47         struct gf100_fb *fb = gf100_fb(base);
48         struct nvkm_device *device = fb->base.subdev.device;
49         int ret, size = 1 << (fb->base.page ? fb->base.page : 17);
50
51         size = nvkm_longopt(device->cfgopt, "MmuDebugBufferSize", size);
52         size = max(size, 0x1000);
53
54         ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000,
55                               true, &fb->base.mmu_rd);
56         if (ret)
57                 return ret;
58
59         ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000,
60                               true, &fb->base.mmu_wr);
61         if (ret)
62                 return ret;
63
64         fb->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
65         if (fb->r100c10_page) {
66                 fb->r100c10 = dma_map_page(device->dev, fb->r100c10_page, 0,
67                                            PAGE_SIZE, DMA_BIDIRECTIONAL);
68                 if (dma_mapping_error(device->dev, fb->r100c10))
69                         return -EFAULT;
70         }
71
72         return 0;
73 }
74
75 int
76 gf100_fb_init_page(struct nvkm_fb *fb)
77 {
78         struct nvkm_device *device = fb->subdev.device;
79         switch (fb->page) {
80         case 16: nvkm_mask(device, 0x100c80, 0x00000001, 0x00000001); break;
81         case 17: nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000); break;
82         default:
83                 return -EINVAL;
84         }
85         return 0;
86 }
87
88 void
89 gf100_fb_init(struct nvkm_fb *base)
90 {
91         struct gf100_fb *fb = gf100_fb(base);
92         struct nvkm_device *device = fb->base.subdev.device;
93
94         if (fb->r100c10_page)
95                 nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8);
96
97         if (base->func->clkgate_pack) {
98                 nvkm_therm_clkgate_init(device->therm,
99                                         base->func->clkgate_pack);
100         }
101 }
102
103 void *
104 gf100_fb_dtor(struct nvkm_fb *base)
105 {
106         struct gf100_fb *fb = gf100_fb(base);
107         struct nvkm_device *device = fb->base.subdev.device;
108
109         if (fb->r100c10_page) {
110                 dma_unmap_page(device->dev, fb->r100c10, PAGE_SIZE,
111                                DMA_BIDIRECTIONAL);
112                 __free_page(fb->r100c10_page);
113         }
114
115         return fb;
116 }
117
118 int
119 gf100_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
120               int index, struct nvkm_fb **pfb)
121 {
122         struct gf100_fb *fb;
123
124         if (!(fb = kzalloc(sizeof(*fb), GFP_KERNEL)))
125                 return -ENOMEM;
126         nvkm_fb_ctor(func, device, index, &fb->base);
127         *pfb = &fb->base;
128
129         return 0;
130 }
131
132 static const struct nvkm_fb_func
133 gf100_fb = {
134         .dtor = gf100_fb_dtor,
135         .oneinit = gf100_fb_oneinit,
136         .init = gf100_fb_init,
137         .init_page = gf100_fb_init_page,
138         .intr = gf100_fb_intr,
139         .ram_new = gf100_ram_new,
140         .default_bigpage = 17,
141 };
142
143 int
144 gf100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
145 {
146         return gf100_fb_new_(&gf100_fb, device, index, pfb);
147 }