GNU Linux-libre 5.19-rc6-gnu
[releases.git] / drivers / video / fbdev / vermilion / vermilion.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (c) Intel Corp. 2007.
4  * All Rights Reserved.
5  *
6  * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
7  * develop this driver.
8  *
9  * This file is part of the Vermilion Range fb driver.
10  *
11  * Authors:
12  *   Thomas Hellström <thomas-at-tungstengraphics-dot-com>
13  *   Michel Dänzer <michel-at-tungstengraphics-dot-com>
14  *   Alan Hourihane <alanh-at-tungstengraphics-dot-com>
15  */
16
17 #include <linux/module.h>
18 #include <linux/kernel.h>
19 #include <linux/errno.h>
20 #include <linux/string.h>
21 #include <linux/delay.h>
22 #include <linux/slab.h>
23 #include <linux/mm.h>
24 #include <linux/fb.h>
25 #include <linux/pci.h>
26 #include <asm/set_memory.h>
27 #include <asm/tlbflush.h>
28 #include <linux/mmzone.h>
29
30 /* #define VERMILION_DEBUG */
31
32 #include "vermilion.h"
33
34 #define MODULE_NAME "vmlfb"
35
36 #define VML_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
37
38 static struct mutex vml_mutex;
39 static struct list_head global_no_mode;
40 static struct list_head global_has_mode;
41 static struct fb_ops vmlfb_ops;
42 static struct vml_sys *subsys = NULL;
43 static char *vml_default_mode = "1024x768@60";
44 static const struct fb_videomode defaultmode = {
45         NULL, 60, 1024, 768, 12896, 144, 24, 29, 3, 136, 6,
46         0, FB_VMODE_NONINTERLACED
47 };
48
49 static u32 vml_mem_requested = (10 * 1024 * 1024);
50 static u32 vml_mem_contig = (4 * 1024 * 1024);
51 static u32 vml_mem_min = (4 * 1024 * 1024);
52
53 static u32 vml_clocks[] = {
54         6750,
55         13500,
56         27000,
57         29700,
58         37125,
59         54000,
60         59400,
61         74250,
62         120000,
63         148500
64 };
65
66 static u32 vml_num_clocks = ARRAY_SIZE(vml_clocks);
67
68 /*
69  * Allocate a contiguous vram area and make its linear kernel map
70  * uncached.
71  */
72
73 static int vmlfb_alloc_vram_area(struct vram_area *va, unsigned max_order,
74                                  unsigned min_order)
75 {
76         gfp_t flags;
77         unsigned long i;
78
79         max_order++;
80         do {
81                 /*
82                  * Really try hard to get the needed memory.
83                  * We need memory below the first 32MB, so we
84                  * add the __GFP_DMA flag that guarantees that we are
85                  * below the first 16MB.
86                  */
87
88                 flags = __GFP_DMA | __GFP_HIGH | __GFP_KSWAPD_RECLAIM;
89                 va->logical =
90                          __get_free_pages(flags, --max_order);
91         } while (va->logical == 0 && max_order > min_order);
92
93         if (!va->logical)
94                 return -ENOMEM;
95
96         va->phys = virt_to_phys((void *)va->logical);
97         va->size = PAGE_SIZE << max_order;
98         va->order = max_order;
99
100         /*
101          * It seems like __get_free_pages only ups the usage count
102          * of the first page. This doesn't work with fault mapping, so
103          * up the usage count once more (XXX: should use split_page or
104          * compound page).
105          */
106
107         memset((void *)va->logical, 0x00, va->size);
108         for (i = va->logical; i < va->logical + va->size; i += PAGE_SIZE) {
109                 get_page(virt_to_page(i));
110         }
111
112         /*
113          * Change caching policy of the linear kernel map to avoid
114          * mapping type conflicts with user-space mappings.
115          */
116         set_pages_uc(virt_to_page(va->logical), va->size >> PAGE_SHIFT);
117
118         printk(KERN_DEBUG MODULE_NAME
119                ": Allocated %ld bytes vram area at 0x%08lx\n",
120                va->size, va->phys);
121
122         return 0;
123 }
124
125 /*
126  * Free a contiguous vram area and reset its linear kernel map
127  * mapping type.
128  */
129
130 static void vmlfb_free_vram_area(struct vram_area *va)
131 {
132         unsigned long j;
133
134         if (va->logical) {
135
136                 /*
137                  * Reset the linear kernel map caching policy.
138                  */
139
140                 set_pages_wb(virt_to_page(va->logical),
141                                  va->size >> PAGE_SHIFT);
142
143                 /*
144                  * Decrease the usage count on the pages we've used
145                  * to compensate for upping when allocating.
146                  */
147
148                 for (j = va->logical; j < va->logical + va->size;
149                      j += PAGE_SIZE) {
150                         (void)put_page_testzero(virt_to_page(j));
151                 }
152
153                 printk(KERN_DEBUG MODULE_NAME
154                        ": Freeing %ld bytes vram area at 0x%08lx\n",
155                        va->size, va->phys);
156                 free_pages(va->logical, va->order);
157
158                 va->logical = 0;
159         }
160 }
161
162 /*
163  * Free allocated vram.
164  */
165
166 static void vmlfb_free_vram(struct vml_info *vinfo)
167 {
168         int i;
169
170         for (i = 0; i < vinfo->num_areas; ++i) {
171                 vmlfb_free_vram_area(&vinfo->vram[i]);
172         }
173         vinfo->num_areas = 0;
174 }
175
176 /*
177  * Allocate vram. Currently we try to allocate contiguous areas from the
178  * __GFP_DMA zone and puzzle them together. A better approach would be to
179  * allocate one contiguous area for scanout and use one-page allocations for
180  * offscreen areas. This requires user-space and GPU virtual mappings.
181  */
182
183 static int vmlfb_alloc_vram(struct vml_info *vinfo,
184                             size_t requested,
185                             size_t min_total, size_t min_contig)
186 {
187         int i, j;
188         int order;
189         int contiguous;
190         int err;
191         struct vram_area *va;
192         struct vram_area *va2;
193
194         vinfo->num_areas = 0;
195         for (i = 0; i < VML_VRAM_AREAS; ++i) {
196                 va = &vinfo->vram[i];
197                 order = 0;
198
199                 while (requested > (PAGE_SIZE << order) && order < MAX_ORDER)
200                         order++;
201
202                 err = vmlfb_alloc_vram_area(va, order, 0);
203
204                 if (err)
205                         break;
206
207                 if (i == 0) {
208                         vinfo->vram_start = va->phys;
209                         vinfo->vram_logical = (void __iomem *) va->logical;
210                         vinfo->vram_contig_size = va->size;
211                         vinfo->num_areas = 1;
212                 } else {
213                         contiguous = 0;
214
215                         for (j = 0; j < i; ++j) {
216                                 va2 = &vinfo->vram[j];
217                                 if (va->phys + va->size == va2->phys ||
218                                     va2->phys + va2->size == va->phys) {
219                                         contiguous = 1;
220                                         break;
221                                 }
222                         }
223
224                         if (contiguous) {
225                                 vinfo->num_areas++;
226                                 if (va->phys < vinfo->vram_start) {
227                                         vinfo->vram_start = va->phys;
228                                         vinfo->vram_logical =
229                                                 (void __iomem *)va->logical;
230                                 }
231                                 vinfo->vram_contig_size += va->size;
232                         } else {
233                                 vmlfb_free_vram_area(va);
234                                 break;
235                         }
236                 }
237
238                 if (requested < va->size)
239                         break;
240                 else
241                         requested -= va->size;
242         }
243
244         if (vinfo->vram_contig_size > min_total &&
245             vinfo->vram_contig_size > min_contig) {
246
247                 printk(KERN_DEBUG MODULE_NAME
248                        ": Contiguous vram: %ld bytes at physical 0x%08lx.\n",
249                        (unsigned long)vinfo->vram_contig_size,
250                        (unsigned long)vinfo->vram_start);
251
252                 return 0;
253         }
254
255         printk(KERN_ERR MODULE_NAME
256                ": Could not allocate requested minimal amount of vram.\n");
257
258         vmlfb_free_vram(vinfo);
259
260         return -ENOMEM;
261 }
262
263 /*
264  * Find the GPU to use with our display controller.
265  */
266
267 static int vmlfb_get_gpu(struct vml_par *par)
268 {
269         mutex_lock(&vml_mutex);
270
271         par->gpu = pci_get_device(PCI_VENDOR_ID_INTEL, VML_DEVICE_GPU, NULL);
272
273         if (!par->gpu) {
274                 mutex_unlock(&vml_mutex);
275                 return -ENODEV;
276         }
277
278         mutex_unlock(&vml_mutex);
279
280         if (pci_enable_device(par->gpu) < 0)
281                 return -ENODEV;
282
283         return 0;
284 }
285
286 /*
287  * Find a contiguous vram area that contains a given offset from vram start.
288  */
289 static int vmlfb_vram_offset(struct vml_info *vinfo, unsigned long offset)
290 {
291         unsigned long aoffset;
292         unsigned i;
293
294         for (i = 0; i < vinfo->num_areas; ++i) {
295                 aoffset = offset - (vinfo->vram[i].phys - vinfo->vram_start);
296
297                 if (aoffset < vinfo->vram[i].size) {
298                         return 0;
299                 }
300         }
301
302         return -EINVAL;
303 }
304
305 /*
306  * Remap the MMIO register spaces of the VDC and the GPU.
307  */
308
309 static int vmlfb_enable_mmio(struct vml_par *par)
310 {
311         int err;
312
313         par->vdc_mem_base = pci_resource_start(par->vdc, 0);
314         par->vdc_mem_size = pci_resource_len(par->vdc, 0);
315         if (!request_mem_region(par->vdc_mem_base, par->vdc_mem_size, "vmlfb")) {
316                 printk(KERN_ERR MODULE_NAME
317                        ": Could not claim display controller MMIO.\n");
318                 return -EBUSY;
319         }
320         par->vdc_mem = ioremap(par->vdc_mem_base, par->vdc_mem_size);
321         if (par->vdc_mem == NULL) {
322                 printk(KERN_ERR MODULE_NAME
323                        ": Could not map display controller MMIO.\n");
324                 err = -ENOMEM;
325                 goto out_err_0;
326         }
327
328         par->gpu_mem_base = pci_resource_start(par->gpu, 0);
329         par->gpu_mem_size = pci_resource_len(par->gpu, 0);
330         if (!request_mem_region(par->gpu_mem_base, par->gpu_mem_size, "vmlfb")) {
331                 printk(KERN_ERR MODULE_NAME ": Could not claim GPU MMIO.\n");
332                 err = -EBUSY;
333                 goto out_err_1;
334         }
335         par->gpu_mem = ioremap(par->gpu_mem_base, par->gpu_mem_size);
336         if (par->gpu_mem == NULL) {
337                 printk(KERN_ERR MODULE_NAME ": Could not map GPU MMIO.\n");
338                 err = -ENOMEM;
339                 goto out_err_2;
340         }
341
342         return 0;
343
344 out_err_2:
345         release_mem_region(par->gpu_mem_base, par->gpu_mem_size);
346 out_err_1:
347         iounmap(par->vdc_mem);
348 out_err_0:
349         release_mem_region(par->vdc_mem_base, par->vdc_mem_size);
350         return err;
351 }
352
353 /*
354  * Unmap the VDC and GPU register spaces.
355  */
356
357 static void vmlfb_disable_mmio(struct vml_par *par)
358 {
359         iounmap(par->gpu_mem);
360         release_mem_region(par->gpu_mem_base, par->gpu_mem_size);
361         iounmap(par->vdc_mem);
362         release_mem_region(par->vdc_mem_base, par->vdc_mem_size);
363 }
364
365 /*
366  * Release and uninit the VDC and GPU.
367  */
368
369 static void vmlfb_release_devices(struct vml_par *par)
370 {
371         if (atomic_dec_and_test(&par->refcount)) {
372                 pci_disable_device(par->gpu);
373                 pci_disable_device(par->vdc);
374         }
375 }
376
377 /*
378  * Free up allocated resources for a device.
379  */
380
381 static void vml_pci_remove(struct pci_dev *dev)
382 {
383         struct fb_info *info;
384         struct vml_info *vinfo;
385         struct vml_par *par;
386
387         info = pci_get_drvdata(dev);
388         if (info) {
389                 vinfo = container_of(info, struct vml_info, info);
390                 par = vinfo->par;
391                 mutex_lock(&vml_mutex);
392                 unregister_framebuffer(info);
393                 fb_dealloc_cmap(&info->cmap);
394                 vmlfb_free_vram(vinfo);
395                 vmlfb_disable_mmio(par);
396                 vmlfb_release_devices(par);
397                 kfree(vinfo);
398                 kfree(par);
399                 mutex_unlock(&vml_mutex);
400         }
401 }
402
403 static void vmlfb_set_pref_pixel_format(struct fb_var_screeninfo *var)
404 {
405         switch (var->bits_per_pixel) {
406         case 16:
407                 var->blue.offset = 0;
408                 var->blue.length = 5;
409                 var->green.offset = 5;
410                 var->green.length = 5;
411                 var->red.offset = 10;
412                 var->red.length = 5;
413                 var->transp.offset = 15;
414                 var->transp.length = 1;
415                 break;
416         case 32:
417                 var->blue.offset = 0;
418                 var->blue.length = 8;
419                 var->green.offset = 8;
420                 var->green.length = 8;
421                 var->red.offset = 16;
422                 var->red.length = 8;
423                 var->transp.offset = 24;
424                 var->transp.length = 0;
425                 break;
426         default:
427                 break;
428         }
429
430         var->blue.msb_right = var->green.msb_right =
431             var->red.msb_right = var->transp.msb_right = 0;
432 }
433
434 /*
435  * Device initialization.
436  * We initialize one vml_par struct per device and one vml_info
437  * struct per pipe. Currently we have only one pipe.
438  */
439
440 static int vml_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
441 {
442         struct vml_info *vinfo;
443         struct fb_info *info;
444         struct vml_par *par;
445         int err = 0;
446
447         par = kzalloc(sizeof(*par), GFP_KERNEL);
448         if (par == NULL)
449                 return -ENOMEM;
450
451         vinfo = kzalloc(sizeof(*vinfo), GFP_KERNEL);
452         if (vinfo == NULL) {
453                 err = -ENOMEM;
454                 goto out_err_0;
455         }
456
457         vinfo->par = par;
458         par->vdc = dev;
459         atomic_set(&par->refcount, 1);
460
461         switch (id->device) {
462         case VML_DEVICE_VDC:
463                 if ((err = vmlfb_get_gpu(par)))
464                         goto out_err_1;
465                 pci_set_drvdata(dev, &vinfo->info);
466                 break;
467         default:
468                 err = -ENODEV;
469                 goto out_err_1;
470         }
471
472         info = &vinfo->info;
473         info->flags = FBINFO_DEFAULT | FBINFO_PARTIAL_PAN_OK;
474
475         err = vmlfb_enable_mmio(par);
476         if (err)
477                 goto out_err_2;
478
479         err = vmlfb_alloc_vram(vinfo, vml_mem_requested,
480                                vml_mem_contig, vml_mem_min);
481         if (err)
482                 goto out_err_3;
483
484         strcpy(info->fix.id, "Vermilion Range");
485         info->fix.mmio_start = 0;
486         info->fix.mmio_len = 0;
487         info->fix.smem_start = vinfo->vram_start;
488         info->fix.smem_len = vinfo->vram_contig_size;
489         info->fix.type = FB_TYPE_PACKED_PIXELS;
490         info->fix.visual = FB_VISUAL_TRUECOLOR;
491         info->fix.ypanstep = 1;
492         info->fix.xpanstep = 1;
493         info->fix.ywrapstep = 0;
494         info->fix.accel = FB_ACCEL_NONE;
495         info->screen_base = vinfo->vram_logical;
496         info->pseudo_palette = vinfo->pseudo_palette;
497         info->par = par;
498         info->fbops = &vmlfb_ops;
499         info->device = &dev->dev;
500
501         INIT_LIST_HEAD(&vinfo->head);
502         vinfo->pipe_disabled = 1;
503         vinfo->cur_blank_mode = FB_BLANK_UNBLANK;
504
505         info->var.grayscale = 0;
506         info->var.bits_per_pixel = 16;
507         vmlfb_set_pref_pixel_format(&info->var);
508
509         if (!fb_find_mode
510             (&info->var, info, vml_default_mode, NULL, 0, &defaultmode, 16)) {
511                 printk(KERN_ERR MODULE_NAME ": Could not find initial mode\n");
512         }
513
514         if (fb_alloc_cmap(&info->cmap, 256, 1) < 0) {
515                 err = -ENOMEM;
516                 goto out_err_4;
517         }
518
519         err = register_framebuffer(info);
520         if (err) {
521                 printk(KERN_ERR MODULE_NAME ": Register framebuffer error.\n");
522                 goto out_err_5;
523         }
524
525         printk("Initialized vmlfb\n");
526
527         return 0;
528
529 out_err_5:
530         fb_dealloc_cmap(&info->cmap);
531 out_err_4:
532         vmlfb_free_vram(vinfo);
533 out_err_3:
534         vmlfb_disable_mmio(par);
535 out_err_2:
536         vmlfb_release_devices(par);
537 out_err_1:
538         kfree(vinfo);
539 out_err_0:
540         kfree(par);
541         return err;
542 }
543
544 static int vmlfb_open(struct fb_info *info, int user)
545 {
546         /*
547          * Save registers here?
548          */
549         return 0;
550 }
551
552 static int vmlfb_release(struct fb_info *info, int user)
553 {
554         /*
555          * Restore registers here.
556          */
557
558         return 0;
559 }
560
561 static int vml_nearest_clock(int clock)
562 {
563
564         int i;
565         int cur_index;
566         int cur_diff;
567         int diff;
568
569         cur_index = 0;
570         cur_diff = clock - vml_clocks[0];
571         cur_diff = (cur_diff < 0) ? -cur_diff : cur_diff;
572         for (i = 1; i < vml_num_clocks; ++i) {
573                 diff = clock - vml_clocks[i];
574                 diff = (diff < 0) ? -diff : diff;
575                 if (diff < cur_diff) {
576                         cur_index = i;
577                         cur_diff = diff;
578                 }
579         }
580         return vml_clocks[cur_index];
581 }
582
583 static int vmlfb_check_var_locked(struct fb_var_screeninfo *var,
584                                   struct vml_info *vinfo)
585 {
586         u32 pitch;
587         u64 mem;
588         int nearest_clock;
589         int clock;
590         int clock_diff;
591         struct fb_var_screeninfo v;
592
593         v = *var;
594         clock = PICOS2KHZ(var->pixclock);
595
596         if (subsys && subsys->nearest_clock) {
597                 nearest_clock = subsys->nearest_clock(subsys, clock);
598         } else {
599                 nearest_clock = vml_nearest_clock(clock);
600         }
601
602         /*
603          * Accept a 20% diff.
604          */
605
606         clock_diff = nearest_clock - clock;
607         clock_diff = (clock_diff < 0) ? -clock_diff : clock_diff;
608         if (clock_diff > clock / 5) {
609 #if 0
610                 printk(KERN_DEBUG MODULE_NAME ": Diff failure. %d %d\n",clock_diff,clock);
611 #endif
612                 return -EINVAL;
613         }
614
615         v.pixclock = KHZ2PICOS(nearest_clock);
616
617         if (var->xres > VML_MAX_XRES || var->yres > VML_MAX_YRES) {
618                 printk(KERN_DEBUG MODULE_NAME ": Resolution failure.\n");
619                 return -EINVAL;
620         }
621         if (var->xres_virtual > VML_MAX_XRES_VIRTUAL) {
622                 printk(KERN_DEBUG MODULE_NAME
623                        ": Virtual resolution failure.\n");
624                 return -EINVAL;
625         }
626         switch (v.bits_per_pixel) {
627         case 0 ... 16:
628                 v.bits_per_pixel = 16;
629                 break;
630         case 17 ... 32:
631                 v.bits_per_pixel = 32;
632                 break;
633         default:
634                 printk(KERN_DEBUG MODULE_NAME ": Invalid bpp: %d.\n",
635                        var->bits_per_pixel);
636                 return -EINVAL;
637         }
638
639         pitch = ALIGN((var->xres * var->bits_per_pixel) >> 3, 0x40);
640         mem = (u64)pitch * var->yres_virtual;
641         if (mem > vinfo->vram_contig_size) {
642                 return -ENOMEM;
643         }
644
645         switch (v.bits_per_pixel) {
646         case 16:
647                 if (var->blue.offset != 0 ||
648                     var->blue.length != 5 ||
649                     var->green.offset != 5 ||
650                     var->green.length != 5 ||
651                     var->red.offset != 10 ||
652                     var->red.length != 5 ||
653                     var->transp.offset != 15 || var->transp.length != 1) {
654                         vmlfb_set_pref_pixel_format(&v);
655                 }
656                 break;
657         case 32:
658                 if (var->blue.offset != 0 ||
659                     var->blue.length != 8 ||
660                     var->green.offset != 8 ||
661                     var->green.length != 8 ||
662                     var->red.offset != 16 ||
663                     var->red.length != 8 ||
664                     (var->transp.length != 0 && var->transp.length != 8) ||
665                     (var->transp.length == 8 && var->transp.offset != 24)) {
666                         vmlfb_set_pref_pixel_format(&v);
667                 }
668                 break;
669         default:
670                 return -EINVAL;
671         }
672
673         *var = v;
674
675         return 0;
676 }
677
678 static int vmlfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
679 {
680         struct vml_info *vinfo = container_of(info, struct vml_info, info);
681         int ret;
682
683         mutex_lock(&vml_mutex);
684         ret = vmlfb_check_var_locked(var, vinfo);
685         mutex_unlock(&vml_mutex);
686
687         return ret;
688 }
689
690 static void vml_wait_vblank(struct vml_info *vinfo)
691 {
692         /* Wait for vblank. For now, just wait for a 50Hz cycle (20ms)) */
693         mdelay(20);
694 }
695
696 static void vmlfb_disable_pipe(struct vml_info *vinfo)
697 {
698         struct vml_par *par = vinfo->par;
699
700         /* Disable the MDVO pad */
701         VML_WRITE32(par, VML_RCOMPSTAT, 0);
702         while (!(VML_READ32(par, VML_RCOMPSTAT) & VML_MDVO_VDC_I_RCOMP)) ;
703
704         /* Disable display planes */
705         VML_WRITE32(par, VML_DSPCCNTR,
706                     VML_READ32(par, VML_DSPCCNTR) & ~VML_GFX_ENABLE);
707         (void)VML_READ32(par, VML_DSPCCNTR);
708         /* Wait for vblank for the disable to take effect */
709         vml_wait_vblank(vinfo);
710
711         /* Next, disable display pipes */
712         VML_WRITE32(par, VML_PIPEACONF, 0);
713         (void)VML_READ32(par, VML_PIPEACONF);
714
715         vinfo->pipe_disabled = 1;
716 }
717
718 #ifdef VERMILION_DEBUG
719 static void vml_dump_regs(struct vml_info *vinfo)
720 {
721         struct vml_par *par = vinfo->par;
722
723         printk(KERN_DEBUG MODULE_NAME ": Modesetting register dump:\n");
724         printk(KERN_DEBUG MODULE_NAME ": \tHTOTAL_A         : 0x%08x\n",
725                (unsigned)VML_READ32(par, VML_HTOTAL_A));
726         printk(KERN_DEBUG MODULE_NAME ": \tHBLANK_A         : 0x%08x\n",
727                (unsigned)VML_READ32(par, VML_HBLANK_A));
728         printk(KERN_DEBUG MODULE_NAME ": \tHSYNC_A          : 0x%08x\n",
729                (unsigned)VML_READ32(par, VML_HSYNC_A));
730         printk(KERN_DEBUG MODULE_NAME ": \tVTOTAL_A         : 0x%08x\n",
731                (unsigned)VML_READ32(par, VML_VTOTAL_A));
732         printk(KERN_DEBUG MODULE_NAME ": \tVBLANK_A         : 0x%08x\n",
733                (unsigned)VML_READ32(par, VML_VBLANK_A));
734         printk(KERN_DEBUG MODULE_NAME ": \tVSYNC_A          : 0x%08x\n",
735                (unsigned)VML_READ32(par, VML_VSYNC_A));
736         printk(KERN_DEBUG MODULE_NAME ": \tDSPCSTRIDE       : 0x%08x\n",
737                (unsigned)VML_READ32(par, VML_DSPCSTRIDE));
738         printk(KERN_DEBUG MODULE_NAME ": \tDSPCSIZE         : 0x%08x\n",
739                (unsigned)VML_READ32(par, VML_DSPCSIZE));
740         printk(KERN_DEBUG MODULE_NAME ": \tDSPCPOS          : 0x%08x\n",
741                (unsigned)VML_READ32(par, VML_DSPCPOS));
742         printk(KERN_DEBUG MODULE_NAME ": \tDSPARB           : 0x%08x\n",
743                (unsigned)VML_READ32(par, VML_DSPARB));
744         printk(KERN_DEBUG MODULE_NAME ": \tDSPCADDR         : 0x%08x\n",
745                (unsigned)VML_READ32(par, VML_DSPCADDR));
746         printk(KERN_DEBUG MODULE_NAME ": \tBCLRPAT_A        : 0x%08x\n",
747                (unsigned)VML_READ32(par, VML_BCLRPAT_A));
748         printk(KERN_DEBUG MODULE_NAME ": \tCANVSCLR_A       : 0x%08x\n",
749                (unsigned)VML_READ32(par, VML_CANVSCLR_A));
750         printk(KERN_DEBUG MODULE_NAME ": \tPIPEASRC         : 0x%08x\n",
751                (unsigned)VML_READ32(par, VML_PIPEASRC));
752         printk(KERN_DEBUG MODULE_NAME ": \tPIPEACONF        : 0x%08x\n",
753                (unsigned)VML_READ32(par, VML_PIPEACONF));
754         printk(KERN_DEBUG MODULE_NAME ": \tDSPCCNTR         : 0x%08x\n",
755                (unsigned)VML_READ32(par, VML_DSPCCNTR));
756         printk(KERN_DEBUG MODULE_NAME ": \tRCOMPSTAT        : 0x%08x\n",
757                (unsigned)VML_READ32(par, VML_RCOMPSTAT));
758         printk(KERN_DEBUG MODULE_NAME ": End of modesetting register dump.\n");
759 }
760 #endif
761
762 static int vmlfb_set_par_locked(struct vml_info *vinfo)
763 {
764         struct vml_par *par = vinfo->par;
765         struct fb_info *info = &vinfo->info;
766         struct fb_var_screeninfo *var = &info->var;
767         u32 htotal, hactive, hblank_start, hblank_end, hsync_start, hsync_end;
768         u32 vtotal, vactive, vblank_start, vblank_end, vsync_start, vsync_end;
769         u32 dspcntr;
770         int clock;
771
772         vinfo->bytes_per_pixel = var->bits_per_pixel >> 3;
773         vinfo->stride = ALIGN(var->xres_virtual * vinfo->bytes_per_pixel, 0x40);
774         info->fix.line_length = vinfo->stride;
775
776         if (!subsys)
777                 return 0;
778
779         htotal =
780             var->xres + var->right_margin + var->hsync_len + var->left_margin;
781         hactive = var->xres;
782         hblank_start = var->xres;
783         hblank_end = htotal;
784         hsync_start = hactive + var->right_margin;
785         hsync_end = hsync_start + var->hsync_len;
786
787         vtotal =
788             var->yres + var->lower_margin + var->vsync_len + var->upper_margin;
789         vactive = var->yres;
790         vblank_start = var->yres;
791         vblank_end = vtotal;
792         vsync_start = vactive + var->lower_margin;
793         vsync_end = vsync_start + var->vsync_len;
794
795         dspcntr = VML_GFX_ENABLE | VML_GFX_GAMMABYPASS;
796         clock = PICOS2KHZ(var->pixclock);
797
798         if (subsys->nearest_clock) {
799                 clock = subsys->nearest_clock(subsys, clock);
800         } else {
801                 clock = vml_nearest_clock(clock);
802         }
803         printk(KERN_DEBUG MODULE_NAME
804                ": Set mode Hfreq : %d kHz, Vfreq : %d Hz.\n", clock / htotal,
805                ((clock / htotal) * 1000) / vtotal);
806
807         switch (var->bits_per_pixel) {
808         case 16:
809                 dspcntr |= VML_GFX_ARGB1555;
810                 break;
811         case 32:
812                 if (var->transp.length == 8)
813                         dspcntr |= VML_GFX_ARGB8888 | VML_GFX_ALPHAMULT;
814                 else
815                         dspcntr |= VML_GFX_RGB0888;
816                 break;
817         default:
818                 return -EINVAL;
819         }
820
821         vmlfb_disable_pipe(vinfo);
822         mb();
823
824         if (subsys->set_clock)
825                 subsys->set_clock(subsys, clock);
826         else
827                 return -EINVAL;
828
829         VML_WRITE32(par, VML_HTOTAL_A, ((htotal - 1) << 16) | (hactive - 1));
830         VML_WRITE32(par, VML_HBLANK_A,
831                     ((hblank_end - 1) << 16) | (hblank_start - 1));
832         VML_WRITE32(par, VML_HSYNC_A,
833                     ((hsync_end - 1) << 16) | (hsync_start - 1));
834         VML_WRITE32(par, VML_VTOTAL_A, ((vtotal - 1) << 16) | (vactive - 1));
835         VML_WRITE32(par, VML_VBLANK_A,
836                     ((vblank_end - 1) << 16) | (vblank_start - 1));
837         VML_WRITE32(par, VML_VSYNC_A,
838                     ((vsync_end - 1) << 16) | (vsync_start - 1));
839         VML_WRITE32(par, VML_DSPCSTRIDE, vinfo->stride);
840         VML_WRITE32(par, VML_DSPCSIZE,
841                     ((var->yres - 1) << 16) | (var->xres - 1));
842         VML_WRITE32(par, VML_DSPCPOS, 0x00000000);
843         VML_WRITE32(par, VML_DSPARB, VML_FIFO_DEFAULT);
844         VML_WRITE32(par, VML_BCLRPAT_A, 0x00000000);
845         VML_WRITE32(par, VML_CANVSCLR_A, 0x00000000);
846         VML_WRITE32(par, VML_PIPEASRC,
847                     ((var->xres - 1) << 16) | (var->yres - 1));
848
849         wmb();
850         VML_WRITE32(par, VML_PIPEACONF, VML_PIPE_ENABLE);
851         wmb();
852         VML_WRITE32(par, VML_DSPCCNTR, dspcntr);
853         wmb();
854         VML_WRITE32(par, VML_DSPCADDR, (u32) vinfo->vram_start +
855                     var->yoffset * vinfo->stride +
856                     var->xoffset * vinfo->bytes_per_pixel);
857
858         VML_WRITE32(par, VML_RCOMPSTAT, VML_MDVO_PAD_ENABLE);
859
860         while (!(VML_READ32(par, VML_RCOMPSTAT) &
861                  (VML_MDVO_VDC_I_RCOMP | VML_MDVO_PAD_ENABLE))) ;
862
863         vinfo->pipe_disabled = 0;
864 #ifdef VERMILION_DEBUG
865         vml_dump_regs(vinfo);
866 #endif
867
868         return 0;
869 }
870
871 static int vmlfb_set_par(struct fb_info *info)
872 {
873         struct vml_info *vinfo = container_of(info, struct vml_info, info);
874         int ret;
875
876         mutex_lock(&vml_mutex);
877         list_move(&vinfo->head, (subsys) ? &global_has_mode : &global_no_mode);
878         ret = vmlfb_set_par_locked(vinfo);
879
880         mutex_unlock(&vml_mutex);
881         return ret;
882 }
883
884 static int vmlfb_blank_locked(struct vml_info *vinfo)
885 {
886         struct vml_par *par = vinfo->par;
887         u32 cur = VML_READ32(par, VML_PIPEACONF);
888
889         switch (vinfo->cur_blank_mode) {
890         case FB_BLANK_UNBLANK:
891                 if (vinfo->pipe_disabled) {
892                         vmlfb_set_par_locked(vinfo);
893                 }
894                 VML_WRITE32(par, VML_PIPEACONF, cur & ~VML_PIPE_FORCE_BORDER);
895                 (void)VML_READ32(par, VML_PIPEACONF);
896                 break;
897         case FB_BLANK_NORMAL:
898                 if (vinfo->pipe_disabled) {
899                         vmlfb_set_par_locked(vinfo);
900                 }
901                 VML_WRITE32(par, VML_PIPEACONF, cur | VML_PIPE_FORCE_BORDER);
902                 (void)VML_READ32(par, VML_PIPEACONF);
903                 break;
904         case FB_BLANK_VSYNC_SUSPEND:
905         case FB_BLANK_HSYNC_SUSPEND:
906                 if (!vinfo->pipe_disabled) {
907                         vmlfb_disable_pipe(vinfo);
908                 }
909                 break;
910         case FB_BLANK_POWERDOWN:
911                 if (!vinfo->pipe_disabled) {
912                         vmlfb_disable_pipe(vinfo);
913                 }
914                 break;
915         default:
916                 return -EINVAL;
917         }
918
919         return 0;
920 }
921
922 static int vmlfb_blank(int blank_mode, struct fb_info *info)
923 {
924         struct vml_info *vinfo = container_of(info, struct vml_info, info);
925         int ret;
926
927         mutex_lock(&vml_mutex);
928         vinfo->cur_blank_mode = blank_mode;
929         ret = vmlfb_blank_locked(vinfo);
930         mutex_unlock(&vml_mutex);
931         return ret;
932 }
933
934 static int vmlfb_pan_display(struct fb_var_screeninfo *var,
935                              struct fb_info *info)
936 {
937         struct vml_info *vinfo = container_of(info, struct vml_info, info);
938         struct vml_par *par = vinfo->par;
939
940         mutex_lock(&vml_mutex);
941         VML_WRITE32(par, VML_DSPCADDR, (u32) vinfo->vram_start +
942                     var->yoffset * vinfo->stride +
943                     var->xoffset * vinfo->bytes_per_pixel);
944         (void)VML_READ32(par, VML_DSPCADDR);
945         mutex_unlock(&vml_mutex);
946
947         return 0;
948 }
949
950 static int vmlfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
951                            u_int transp, struct fb_info *info)
952 {
953         u32 v;
954
955         if (regno >= 16)
956                 return -EINVAL;
957
958         if (info->var.grayscale) {
959                 red = green = blue = (red * 77 + green * 151 + blue * 28) >> 8;
960         }
961
962         if (info->fix.visual != FB_VISUAL_TRUECOLOR)
963                 return -EINVAL;
964
965         red = VML_TOHW(red, info->var.red.length);
966         blue = VML_TOHW(blue, info->var.blue.length);
967         green = VML_TOHW(green, info->var.green.length);
968         transp = VML_TOHW(transp, info->var.transp.length);
969
970         v = (red << info->var.red.offset) |
971             (green << info->var.green.offset) |
972             (blue << info->var.blue.offset) |
973             (transp << info->var.transp.offset);
974
975         switch (info->var.bits_per_pixel) {
976         case 16:
977                 ((u32 *) info->pseudo_palette)[regno] = v;
978                 break;
979         case 24:
980         case 32:
981                 ((u32 *) info->pseudo_palette)[regno] = v;
982                 break;
983         }
984         return 0;
985 }
986
987 static int vmlfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
988 {
989         struct vml_info *vinfo = container_of(info, struct vml_info, info);
990         unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
991         int ret;
992         unsigned long prot;
993
994         ret = vmlfb_vram_offset(vinfo, offset);
995         if (ret)
996                 return -EINVAL;
997
998         prot = pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK;
999         pgprot_val(vma->vm_page_prot) =
1000                 prot | cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS);
1001
1002         return vm_iomap_memory(vma, vinfo->vram_start,
1003                         vinfo->vram_contig_size);
1004 }
1005
1006 static int vmlfb_sync(struct fb_info *info)
1007 {
1008         return 0;
1009 }
1010
1011 static int vmlfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
1012 {
1013         return -EINVAL; /* just to force soft_cursor() call */
1014 }
1015
1016 static struct fb_ops vmlfb_ops = {
1017         .owner = THIS_MODULE,
1018         .fb_open = vmlfb_open,
1019         .fb_release = vmlfb_release,
1020         .fb_check_var = vmlfb_check_var,
1021         .fb_set_par = vmlfb_set_par,
1022         .fb_blank = vmlfb_blank,
1023         .fb_pan_display = vmlfb_pan_display,
1024         .fb_fillrect = cfb_fillrect,
1025         .fb_copyarea = cfb_copyarea,
1026         .fb_imageblit = cfb_imageblit,
1027         .fb_cursor = vmlfb_cursor,
1028         .fb_sync = vmlfb_sync,
1029         .fb_mmap = vmlfb_mmap,
1030         .fb_setcolreg = vmlfb_setcolreg
1031 };
1032
1033 static const struct pci_device_id vml_ids[] = {
1034         {PCI_DEVICE(PCI_VENDOR_ID_INTEL, VML_DEVICE_VDC)},
1035         {0}
1036 };
1037
1038 static struct pci_driver vmlfb_pci_driver = {
1039         .name = "vmlfb",
1040         .id_table = vml_ids,
1041         .probe = vml_pci_probe,
1042         .remove = vml_pci_remove,
1043 };
1044
1045 static void __exit vmlfb_cleanup(void)
1046 {
1047         pci_unregister_driver(&vmlfb_pci_driver);
1048 }
1049
1050 static int __init vmlfb_init(void)
1051 {
1052
1053 #ifndef MODULE
1054         char *option = NULL;
1055
1056         if (fb_get_options(MODULE_NAME, &option))
1057                 return -ENODEV;
1058 #endif
1059
1060         printk(KERN_DEBUG MODULE_NAME ": initializing\n");
1061         mutex_init(&vml_mutex);
1062         INIT_LIST_HEAD(&global_no_mode);
1063         INIT_LIST_HEAD(&global_has_mode);
1064
1065         return pci_register_driver(&vmlfb_pci_driver);
1066 }
1067
1068 int vmlfb_register_subsys(struct vml_sys *sys)
1069 {
1070         struct vml_info *entry;
1071         struct list_head *list;
1072         u32 save_activate;
1073
1074         mutex_lock(&vml_mutex);
1075         if (subsys != NULL) {
1076                 subsys->restore(subsys);
1077         }
1078         subsys = sys;
1079         subsys->save(subsys);
1080
1081         /*
1082          * We need to restart list traversal for each item, since we
1083          * release the list mutex in the loop.
1084          */
1085
1086         list = global_no_mode.next;
1087         while (list != &global_no_mode) {
1088                 list_del_init(list);
1089                 entry = list_entry(list, struct vml_info, head);
1090
1091                 /*
1092                  * First, try the current mode which might not be
1093                  * completely validated with respect to the pixel clock.
1094                  */
1095
1096                 if (!vmlfb_check_var_locked(&entry->info.var, entry)) {
1097                         vmlfb_set_par_locked(entry);
1098                         list_add_tail(list, &global_has_mode);
1099                 } else {
1100
1101                         /*
1102                          * Didn't work. Try to find another mode,
1103                          * that matches this subsys.
1104                          */
1105
1106                         mutex_unlock(&vml_mutex);
1107                         save_activate = entry->info.var.activate;
1108                         entry->info.var.bits_per_pixel = 16;
1109                         vmlfb_set_pref_pixel_format(&entry->info.var);
1110                         if (fb_find_mode(&entry->info.var,
1111                                          &entry->info,
1112                                          vml_default_mode, NULL, 0, NULL, 16)) {
1113                                 entry->info.var.activate |=
1114                                     FB_ACTIVATE_FORCE | FB_ACTIVATE_NOW;
1115                                 fb_set_var(&entry->info, &entry->info.var);
1116                         } else {
1117                                 printk(KERN_ERR MODULE_NAME
1118                                        ": Sorry. no mode found for this subsys.\n");
1119                         }
1120                         entry->info.var.activate = save_activate;
1121                         mutex_lock(&vml_mutex);
1122                 }
1123                 vmlfb_blank_locked(entry);
1124                 list = global_no_mode.next;
1125         }
1126         mutex_unlock(&vml_mutex);
1127
1128         printk(KERN_DEBUG MODULE_NAME ": Registered %s subsystem.\n",
1129                                 subsys->name ? subsys->name : "unknown");
1130         return 0;
1131 }
1132
1133 EXPORT_SYMBOL_GPL(vmlfb_register_subsys);
1134
1135 void vmlfb_unregister_subsys(struct vml_sys *sys)
1136 {
1137         struct vml_info *entry, *next;
1138
1139         mutex_lock(&vml_mutex);
1140         if (subsys != sys) {
1141                 mutex_unlock(&vml_mutex);
1142                 return;
1143         }
1144         subsys->restore(subsys);
1145         subsys = NULL;
1146         list_for_each_entry_safe(entry, next, &global_has_mode, head) {
1147                 printk(KERN_DEBUG MODULE_NAME ": subsys disable pipe\n");
1148                 vmlfb_disable_pipe(entry);
1149                 list_move_tail(&entry->head, &global_no_mode);
1150         }
1151         mutex_unlock(&vml_mutex);
1152 }
1153
1154 EXPORT_SYMBOL_GPL(vmlfb_unregister_subsys);
1155
1156 module_init(vmlfb_init);
1157 module_exit(vmlfb_cleanup);
1158
1159 MODULE_AUTHOR("Tungsten Graphics");
1160 MODULE_DESCRIPTION("Initialization of the Vermilion display devices");
1161 MODULE_VERSION("1.0.0");
1162 MODULE_LICENSE("GPL");