1 /**************************************************************************
3 * Copyright © 2007 David Airlie
4 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
29 #include <linux/pci.h>
31 #include <drm/drm_fourcc.h>
32 #include <drm/ttm/ttm_placement.h>
34 #include "vmwgfx_drv.h"
35 #include "vmwgfx_kms.h"
37 #define VMW_DIRTY_DELAY (HZ / 30)
40 struct vmw_private *vmw_priv;
44 struct mutex bo_mutex;
45 struct vmw_buffer_object *vmw_bo;
47 struct drm_framebuffer *set_fb;
48 struct drm_display_mode *set_mode;
53 u32 pseudo_palette[17];
67 struct drm_crtc *crtc;
68 struct drm_connector *con;
69 struct delayed_work local_work;
72 static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
73 unsigned blue, unsigned transp,
76 struct vmw_fb_par *par = info->par;
77 u32 *pal = par->pseudo_palette;
80 DRM_ERROR("Bad regno %u.\n", regno);
84 switch (par->set_fb->format->depth) {
87 pal[regno] = ((red & 0xff00) << 8) |
89 ((blue & 0xff00) >> 8);
92 DRM_ERROR("Bad depth %u, bpp %u.\n",
93 par->set_fb->format->depth,
94 par->set_fb->format->cpp[0] * 8);
101 static int vmw_fb_check_var(struct fb_var_screeninfo *var,
102 struct fb_info *info)
104 int depth = var->bits_per_pixel;
105 struct vmw_fb_par *par = info->par;
106 struct vmw_private *vmw_priv = par->vmw_priv;
108 switch (var->bits_per_pixel) {
110 depth = (var->transp.length > 0) ? 32 : 24;
113 DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
119 var->red.offset = 16;
120 var->green.offset = 8;
121 var->blue.offset = 0;
123 var->green.length = 8;
124 var->blue.length = 8;
125 var->transp.length = 0;
126 var->transp.offset = 0;
129 var->red.offset = 16;
130 var->green.offset = 8;
131 var->blue.offset = 0;
133 var->green.length = 8;
134 var->blue.length = 8;
135 var->transp.length = 8;
136 var->transp.offset = 24;
139 DRM_ERROR("Bad depth %u.\n", depth);
143 if ((var->xoffset + var->xres) > par->max_width ||
144 (var->yoffset + var->yres) > par->max_height) {
145 DRM_ERROR("Requested geom can not fit in framebuffer\n");
149 if (!vmw_kms_validate_mode_vram(vmw_priv,
150 var->xres * var->bits_per_pixel/8,
151 var->yoffset + var->yres)) {
152 DRM_ERROR("Requested geom can not fit in framebuffer\n");
159 static int vmw_fb_blank(int blank, struct fb_info *info)
165 * vmw_fb_dirty_flush - flush dirty regions to the kms framebuffer
167 * @work: The struct work_struct associated with this task.
169 * This function flushes the dirty regions of the vmalloc framebuffer to the
170 * kms framebuffer, and if the kms framebuffer is visible, also updated the
171 * corresponding displays. Note that this function runs even if the kms
172 * framebuffer is not bound to a crtc and thus not visible, but it's turned
173 * off during hibernation using the par->dirty.active bool.
175 static void vmw_fb_dirty_flush(struct work_struct *work)
177 struct vmw_fb_par *par = container_of(work, struct vmw_fb_par,
179 struct vmw_private *vmw_priv = par->vmw_priv;
180 struct fb_info *info = vmw_priv->fb_info;
181 unsigned long irq_flags;
182 s32 dst_x1, dst_x2, dst_y1, dst_y2, w = 0, h = 0;
183 u32 cpp, max_x, max_y;
184 struct drm_clip_rect clip;
185 struct drm_framebuffer *cur_fb;
186 u8 *src_ptr, *dst_ptr;
187 struct vmw_buffer_object *vbo = par->vmw_bo;
190 if (!READ_ONCE(par->dirty.active))
193 mutex_lock(&par->bo_mutex);
194 cur_fb = par->set_fb;
198 (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
199 virtual = vmw_bo_map_and_cache(vbo);
203 spin_lock_irqsave(&par->dirty.lock, irq_flags);
204 if (!par->dirty.active) {
205 spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
210 * Handle panning when copying from vmalloc to framebuffer.
211 * Clip dirty area to framebuffer.
213 cpp = cur_fb->format->cpp[0];
214 max_x = par->fb_x + cur_fb->width;
215 max_y = par->fb_y + cur_fb->height;
217 dst_x1 = par->dirty.x1 - par->fb_x;
218 dst_y1 = par->dirty.y1 - par->fb_y;
219 dst_x1 = max_t(s32, dst_x1, 0);
220 dst_y1 = max_t(s32, dst_y1, 0);
222 dst_x2 = par->dirty.x2 - par->fb_x;
223 dst_y2 = par->dirty.y2 - par->fb_y;
224 dst_x2 = min_t(s32, dst_x2, max_x);
225 dst_y2 = min_t(s32, dst_y2, max_y);
228 w = max_t(s32, 0, w);
229 h = max_t(s32, 0, h);
231 par->dirty.x1 = par->dirty.x2 = 0;
232 par->dirty.y1 = par->dirty.y2 = 0;
233 spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
236 dst_ptr = (u8 *)virtual +
237 (dst_y1 * par->set_fb->pitches[0] + dst_x1 * cpp);
238 src_ptr = (u8 *)par->vmalloc +
239 ((dst_y1 + par->fb_y) * info->fix.line_length +
240 (dst_x1 + par->fb_x) * cpp);
243 memcpy(dst_ptr, src_ptr, w*cpp);
244 dst_ptr += par->set_fb->pitches[0];
245 src_ptr += info->fix.line_length;
255 ttm_bo_unreserve(&vbo->base);
257 WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0,
259 vmw_cmd_flush(vmw_priv, false);
262 mutex_unlock(&par->bo_mutex);
265 static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
266 unsigned x1, unsigned y1,
267 unsigned width, unsigned height)
270 unsigned x2 = x1 + width;
271 unsigned y2 = y1 + height;
273 spin_lock_irqsave(&par->dirty.lock, flags);
274 if (par->dirty.x1 == par->dirty.x2) {
279 /* if we are active start the dirty work
280 * we share the work with the defio system */
281 if (par->dirty.active)
282 schedule_delayed_work(&par->local_work,
285 if (x1 < par->dirty.x1)
287 if (y1 < par->dirty.y1)
289 if (x2 > par->dirty.x2)
291 if (y2 > par->dirty.y2)
294 spin_unlock_irqrestore(&par->dirty.lock, flags);
297 static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
298 struct fb_info *info)
300 struct vmw_fb_par *par = info->par;
302 if ((var->xoffset + var->xres) > var->xres_virtual ||
303 (var->yoffset + var->yres) > var->yres_virtual) {
304 DRM_ERROR("Requested panning can not fit in framebuffer\n");
308 mutex_lock(&par->bo_mutex);
309 par->fb_x = var->xoffset;
310 par->fb_y = var->yoffset;
312 vmw_fb_dirty_mark(par, par->fb_x, par->fb_y, par->set_fb->width,
313 par->set_fb->height);
314 mutex_unlock(&par->bo_mutex);
319 static void vmw_deferred_io(struct fb_info *info, struct list_head *pagereflist)
321 struct vmw_fb_par *par = info->par;
322 unsigned long start, end, min, max;
324 struct fb_deferred_io_pageref *pageref;
329 list_for_each_entry(pageref, pagereflist, list) {
330 start = pageref->offset;
331 end = start + PAGE_SIZE - 1;
332 min = min(min, start);
337 y1 = min / info->fix.line_length;
338 y2 = (max / info->fix.line_length) + 1;
340 spin_lock_irqsave(&par->dirty.lock, flags);
343 par->dirty.x2 = info->var.xres;
345 spin_unlock_irqrestore(&par->dirty.lock, flags);
348 * Since we've already waited on this work once, try to
351 cancel_delayed_work(&par->local_work);
352 schedule_delayed_work(&par->local_work, 0);
356 static struct fb_deferred_io vmw_defio = {
357 .delay = VMW_DIRTY_DELAY,
358 .deferred_io = vmw_deferred_io,
365 static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
367 cfb_fillrect(info, rect);
368 vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
369 rect->width, rect->height);
372 static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
374 cfb_copyarea(info, region);
375 vmw_fb_dirty_mark(info->par, region->dx, region->dy,
376 region->width, region->height);
379 static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
381 cfb_imageblit(info, image);
382 vmw_fb_dirty_mark(info->par, image->dx, image->dy,
383 image->width, image->height);
390 static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
391 size_t size, struct vmw_buffer_object **out)
393 struct vmw_buffer_object *vmw_bo;
396 ret = vmw_bo_create(vmw_priv, size,
399 &vmw_bo_bo_free, &vmw_bo);
400 if (unlikely(ret != 0))
408 static int vmw_fb_compute_depth(struct fb_var_screeninfo *var,
411 switch (var->bits_per_pixel) {
413 *depth = (var->transp.length > 0) ? 32 : 24;
416 DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
423 static int vmwgfx_set_config_internal(struct drm_mode_set *set)
425 struct drm_crtc *crtc = set->crtc;
426 struct drm_modeset_acquire_ctx ctx;
429 drm_modeset_acquire_init(&ctx, 0);
432 ret = crtc->funcs->set_config(set, &ctx);
434 if (ret == -EDEADLK) {
435 drm_modeset_backoff(&ctx);
439 drm_modeset_drop_locks(&ctx);
440 drm_modeset_acquire_fini(&ctx);
445 static int vmw_fb_kms_detach(struct vmw_fb_par *par,
449 struct drm_framebuffer *cur_fb = par->set_fb;
452 /* Detach the KMS framebuffer from crtcs */
454 struct drm_mode_set set;
456 set.crtc = par->crtc;
461 set.num_connectors = 0;
462 set.connectors = &par->con;
463 ret = vmwgfx_set_config_internal(&set);
465 DRM_ERROR("Could not unset a mode.\n");
468 drm_mode_destroy(&par->vmw_priv->drm, par->set_mode);
469 par->set_mode = NULL;
473 drm_framebuffer_put(cur_fb);
477 if (par->vmw_bo && detach_bo && unref_bo)
478 vmw_bo_unreference(&par->vmw_bo);
483 static int vmw_fb_kms_framebuffer(struct fb_info *info)
485 struct drm_mode_fb_cmd2 mode_cmd = {0};
486 struct vmw_fb_par *par = info->par;
487 struct fb_var_screeninfo *var = &info->var;
488 struct drm_framebuffer *cur_fb;
489 struct vmw_framebuffer *vfb;
493 ret = vmw_fb_compute_depth(var, &depth);
497 mode_cmd.width = var->xres;
498 mode_cmd.height = var->yres;
499 mode_cmd.pitches[0] = ((var->bits_per_pixel + 7) / 8) * mode_cmd.width;
500 mode_cmd.pixel_format =
501 drm_mode_legacy_fb_format(var->bits_per_pixel, depth);
503 cur_fb = par->set_fb;
504 if (cur_fb && cur_fb->width == mode_cmd.width &&
505 cur_fb->height == mode_cmd.height &&
506 cur_fb->format->format == mode_cmd.pixel_format &&
507 cur_fb->pitches[0] == mode_cmd.pitches[0])
510 /* Need new buffer object ? */
511 new_bo_size = (size_t) mode_cmd.pitches[0] * (size_t) mode_cmd.height;
512 ret = vmw_fb_kms_detach(par,
513 par->bo_size < new_bo_size ||
514 par->bo_size > 2*new_bo_size,
520 ret = vmw_fb_create_bo(par->vmw_priv, new_bo_size,
523 DRM_ERROR("Failed creating a buffer object for "
527 par->bo_size = new_bo_size;
530 vfb = vmw_kms_new_framebuffer(par->vmw_priv, par->vmw_bo, NULL,
535 par->set_fb = &vfb->base;
540 static int vmw_fb_set_par(struct fb_info *info)
542 struct vmw_fb_par *par = info->par;
543 struct vmw_private *vmw_priv = par->vmw_priv;
544 struct drm_mode_set set;
545 struct fb_var_screeninfo *var = &info->var;
546 struct drm_display_mode new_mode = { DRM_MODE("fb_mode",
547 DRM_MODE_TYPE_DRIVER,
548 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
549 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
551 struct drm_display_mode *mode;
554 mode = drm_mode_duplicate(&vmw_priv->drm, &new_mode);
556 DRM_ERROR("Could not create new fb mode.\n");
560 mode->hdisplay = var->xres;
561 mode->vdisplay = var->yres;
562 vmw_guess_mode_timing(mode);
564 if (!vmw_kms_validate_mode_vram(vmw_priv,
566 DIV_ROUND_UP(var->bits_per_pixel, 8),
568 drm_mode_destroy(&vmw_priv->drm, mode);
572 mutex_lock(&par->bo_mutex);
573 ret = vmw_fb_kms_framebuffer(info);
577 par->fb_x = var->xoffset;
578 par->fb_y = var->yoffset;
580 set.crtc = par->crtc;
584 set.fb = par->set_fb;
585 set.num_connectors = 1;
586 set.connectors = &par->con;
588 ret = vmwgfx_set_config_internal(&set);
592 vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
593 par->set_fb->width, par->set_fb->height);
595 /* If there already was stuff dirty we wont
596 * schedule a new work, so lets do it now */
598 schedule_delayed_work(&par->local_work, 0);
602 drm_mode_destroy(&vmw_priv->drm, par->set_mode);
603 par->set_mode = mode;
605 mutex_unlock(&par->bo_mutex);
611 static const struct fb_ops vmw_fb_ops = {
612 .owner = THIS_MODULE,
613 .fb_check_var = vmw_fb_check_var,
614 .fb_set_par = vmw_fb_set_par,
615 .fb_setcolreg = vmw_fb_setcolreg,
616 .fb_fillrect = vmw_fb_fillrect,
617 .fb_copyarea = vmw_fb_copyarea,
618 .fb_imageblit = vmw_fb_imageblit,
619 .fb_pan_display = vmw_fb_pan_display,
620 .fb_blank = vmw_fb_blank,
621 .fb_mmap = fb_deferred_io_mmap,
624 int vmw_fb_init(struct vmw_private *vmw_priv)
626 struct device *device = vmw_priv->drm.dev;
627 struct vmw_fb_par *par;
628 struct fb_info *info;
629 unsigned fb_width, fb_height;
630 unsigned int fb_bpp, fb_pitch, fb_size;
631 struct drm_display_mode *init_mode;
636 /* XXX As shouldn't these be as well. */
637 fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
638 fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
640 fb_pitch = fb_width * fb_bpp / 8;
641 fb_size = fb_pitch * fb_height;
643 info = framebuffer_alloc(sizeof(*par), device);
650 vmw_priv->fb_info = info;
652 memset(par, 0, sizeof(*par));
653 INIT_DELAYED_WORK(&par->local_work, &vmw_fb_dirty_flush);
654 par->vmw_priv = vmw_priv;
656 par->max_width = fb_width;
657 par->max_height = fb_height;
659 ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width,
660 par->max_height, &par->con,
661 &par->crtc, &init_mode);
665 info->var.xres = init_mode->hdisplay;
666 info->var.yres = init_mode->vdisplay;
669 * Create buffers and alloc memory
671 par->vmalloc = vzalloc(fb_size);
672 if (unlikely(par->vmalloc == NULL)) {
680 strcpy(info->fix.id, "svgadrmfb");
681 info->fix.type = FB_TYPE_PACKED_PIXELS;
682 info->fix.visual = FB_VISUAL_TRUECOLOR;
683 info->fix.type_aux = 0;
684 info->fix.xpanstep = 1; /* doing it in hw */
685 info->fix.ypanstep = 1; /* doing it in hw */
686 info->fix.ywrapstep = 0;
687 info->fix.accel = FB_ACCEL_NONE;
688 info->fix.line_length = fb_pitch;
690 info->fix.smem_start = 0;
691 info->fix.smem_len = fb_size;
693 info->pseudo_palette = par->pseudo_palette;
694 info->screen_base = (char __iomem *)par->vmalloc;
695 info->screen_size = fb_size;
697 info->fbops = &vmw_fb_ops;
699 /* 24 depth per default */
700 info->var.red.offset = 16;
701 info->var.green.offset = 8;
702 info->var.blue.offset = 0;
703 info->var.red.length = 8;
704 info->var.green.length = 8;
705 info->var.blue.length = 8;
706 info->var.transp.offset = 0;
707 info->var.transp.length = 0;
709 info->var.xres_virtual = fb_width;
710 info->var.yres_virtual = fb_height;
711 info->var.bits_per_pixel = fb_bpp;
712 info->var.xoffset = 0;
713 info->var.yoffset = 0;
714 info->var.activate = FB_ACTIVATE_NOW;
715 info->var.height = -1;
716 info->var.width = -1;
718 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
719 info->apertures = alloc_apertures(1);
720 if (!info->apertures) {
724 info->apertures->ranges[0].base = vmw_priv->vram_start;
725 info->apertures->ranges[0].size = vmw_priv->vram_size;
728 * Dirty & Deferred IO
730 par->dirty.x1 = par->dirty.x2 = 0;
731 par->dirty.y1 = par->dirty.y2 = 0;
732 par->dirty.active = true;
733 spin_lock_init(&par->dirty.lock);
734 mutex_init(&par->bo_mutex);
735 info->fbdefio = &vmw_defio;
736 fb_deferred_io_init(info);
738 ret = register_framebuffer(info);
739 if (unlikely(ret != 0))
742 vmw_fb_set_par(info);
747 fb_deferred_io_cleanup(info);
752 framebuffer_release(info);
753 vmw_priv->fb_info = NULL;
758 int vmw_fb_close(struct vmw_private *vmw_priv)
760 struct fb_info *info;
761 struct vmw_fb_par *par;
763 if (!vmw_priv->fb_info)
766 info = vmw_priv->fb_info;
770 fb_deferred_io_cleanup(info);
771 cancel_delayed_work_sync(&par->local_work);
772 unregister_framebuffer(info);
774 mutex_lock(&par->bo_mutex);
775 (void) vmw_fb_kms_detach(par, true, true);
776 mutex_unlock(&par->bo_mutex);
779 framebuffer_release(info);
784 int vmw_fb_off(struct vmw_private *vmw_priv)
786 struct fb_info *info;
787 struct vmw_fb_par *par;
790 if (!vmw_priv->fb_info)
793 info = vmw_priv->fb_info;
796 spin_lock_irqsave(&par->dirty.lock, flags);
797 par->dirty.active = false;
798 spin_unlock_irqrestore(&par->dirty.lock, flags);
800 flush_delayed_work(&info->deferred_work);
801 flush_delayed_work(&par->local_work);
806 int vmw_fb_on(struct vmw_private *vmw_priv)
808 struct fb_info *info;
809 struct vmw_fb_par *par;
812 if (!vmw_priv->fb_info)
815 info = vmw_priv->fb_info;
818 spin_lock_irqsave(&par->dirty.lock, flags);
819 par->dirty.active = true;
820 spin_unlock_irqrestore(&par->dirty.lock, flags);
823 * Need to reschedule a dirty update, because otherwise that's
824 * only done in dirty_mark() if the previous coalesced
825 * dirty region was empty.
827 schedule_delayed_work(&par->local_work, 0);