2 * Copyright (C) 2015 Etnaviv Project
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
17 #include <linux/component.h>
18 #include <linux/of_platform.h>
19 #include <drm/drm_of.h>
21 #include "etnaviv_cmdbuf.h"
22 #include "etnaviv_drv.h"
23 #include "etnaviv_gpu.h"
24 #include "etnaviv_gem.h"
25 #include "etnaviv_mmu.h"
27 #ifdef CONFIG_DRM_ETNAVIV_REGISTER_LOGGING
29 MODULE_PARM_DESC(reglog, "Enable register read/write logging");
30 module_param(reglog, bool, 0600);
35 void __iomem *etnaviv_ioremap(struct platform_device *pdev, const char *name,
42 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
44 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
46 ptr = devm_ioremap_resource(&pdev->dev, res);
48 dev_err(&pdev->dev, "failed to ioremap %s: %ld\n", name,
54 dev_printk(KERN_DEBUG, &pdev->dev, "IO:region %s 0x%p %08zx\n",
55 dbgname, ptr, (size_t)resource_size(res));
60 void etnaviv_writel(u32 data, void __iomem *addr)
63 printk(KERN_DEBUG "IO:W %p %08x\n", addr, data);
68 u32 etnaviv_readl(const void __iomem *addr)
70 u32 val = readl(addr);
73 printk(KERN_DEBUG "IO:R %p %08x\n", addr, val);
83 static void load_gpu(struct drm_device *dev)
85 struct etnaviv_drm_private *priv = dev->dev_private;
88 for (i = 0; i < ETNA_MAX_PIPES; i++) {
89 struct etnaviv_gpu *g = priv->gpu[i];
94 ret = etnaviv_gpu_init(g);
101 static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
103 struct etnaviv_file_private *ctx;
105 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
109 file->driver_priv = ctx;
114 static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
116 struct etnaviv_drm_private *priv = dev->dev_private;
117 struct etnaviv_file_private *ctx = file->driver_priv;
120 for (i = 0; i < ETNA_MAX_PIPES; i++) {
121 struct etnaviv_gpu *gpu = priv->gpu[i];
124 mutex_lock(&gpu->lock);
125 if (gpu->lastctx == ctx)
127 mutex_unlock(&gpu->lock);
138 #ifdef CONFIG_DEBUG_FS
139 static int etnaviv_gem_show(struct drm_device *dev, struct seq_file *m)
141 struct etnaviv_drm_private *priv = dev->dev_private;
143 etnaviv_gem_describe_objects(priv, m);
148 static int etnaviv_mm_show(struct drm_device *dev, struct seq_file *m)
150 struct drm_printer p = drm_seq_file_printer(m);
152 read_lock(&dev->vma_offset_manager->vm_lock);
153 drm_mm_print(&dev->vma_offset_manager->vm_addr_space_mm, &p);
154 read_unlock(&dev->vma_offset_manager->vm_lock);
159 static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m)
161 struct drm_printer p = drm_seq_file_printer(m);
163 seq_printf(m, "Active Objects (%s):\n", dev_name(gpu->dev));
165 mutex_lock(&gpu->mmu->lock);
166 drm_mm_print(&gpu->mmu->mm, &p);
167 mutex_unlock(&gpu->mmu->lock);
172 static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, struct seq_file *m)
174 struct etnaviv_cmdbuf *buf = gpu->buffer;
175 u32 size = buf->size;
176 u32 *ptr = buf->vaddr;
179 seq_printf(m, "virt %p - phys 0x%llx - free 0x%08x\n",
180 buf->vaddr, (u64)etnaviv_cmdbuf_get_pa(buf),
181 size - buf->user_size);
183 for (i = 0; i < size / 4; i++) {
187 seq_printf(m, "\t0x%p: ", ptr + i);
188 seq_printf(m, "%08x ", *(ptr + i));
193 static int etnaviv_ring_show(struct etnaviv_gpu *gpu, struct seq_file *m)
195 seq_printf(m, "Ring Buffer (%s): ", dev_name(gpu->dev));
197 mutex_lock(&gpu->lock);
198 etnaviv_buffer_dump(gpu, m);
199 mutex_unlock(&gpu->lock);
204 static int show_unlocked(struct seq_file *m, void *arg)
206 struct drm_info_node *node = (struct drm_info_node *) m->private;
207 struct drm_device *dev = node->minor->dev;
208 int (*show)(struct drm_device *dev, struct seq_file *m) =
209 node->info_ent->data;
214 static int show_each_gpu(struct seq_file *m, void *arg)
216 struct drm_info_node *node = (struct drm_info_node *) m->private;
217 struct drm_device *dev = node->minor->dev;
218 struct etnaviv_drm_private *priv = dev->dev_private;
219 struct etnaviv_gpu *gpu;
220 int (*show)(struct etnaviv_gpu *gpu, struct seq_file *m) =
221 node->info_ent->data;
225 for (i = 0; i < ETNA_MAX_PIPES; i++) {
238 static struct drm_info_list etnaviv_debugfs_list[] = {
239 {"gpu", show_each_gpu, 0, etnaviv_gpu_debugfs},
240 {"gem", show_unlocked, 0, etnaviv_gem_show},
241 { "mm", show_unlocked, 0, etnaviv_mm_show },
242 {"mmu", show_each_gpu, 0, etnaviv_mmu_show},
243 {"ring", show_each_gpu, 0, etnaviv_ring_show},
246 static int etnaviv_debugfs_init(struct drm_minor *minor)
248 struct drm_device *dev = minor->dev;
251 ret = drm_debugfs_create_files(etnaviv_debugfs_list,
252 ARRAY_SIZE(etnaviv_debugfs_list),
253 minor->debugfs_root, minor);
256 dev_err(dev->dev, "could not install etnaviv_debugfs_list\n");
268 static int etnaviv_ioctl_get_param(struct drm_device *dev, void *data,
269 struct drm_file *file)
271 struct etnaviv_drm_private *priv = dev->dev_private;
272 struct drm_etnaviv_param *args = data;
273 struct etnaviv_gpu *gpu;
275 if (args->pipe >= ETNA_MAX_PIPES)
278 gpu = priv->gpu[args->pipe];
282 return etnaviv_gpu_get_param(gpu, args->param, &args->value);
285 static int etnaviv_ioctl_gem_new(struct drm_device *dev, void *data,
286 struct drm_file *file)
288 struct drm_etnaviv_gem_new *args = data;
290 if (args->flags & ~(ETNA_BO_CACHED | ETNA_BO_WC | ETNA_BO_UNCACHED |
294 return etnaviv_gem_new_handle(dev, file, args->size,
295 args->flags, &args->handle);
298 #define TS(t) ((struct timespec){ \
299 .tv_sec = (t).tv_sec, \
300 .tv_nsec = (t).tv_nsec \
303 static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
304 struct drm_file *file)
306 struct drm_etnaviv_gem_cpu_prep *args = data;
307 struct drm_gem_object *obj;
310 if (args->op & ~(ETNA_PREP_READ | ETNA_PREP_WRITE | ETNA_PREP_NOSYNC))
313 obj = drm_gem_object_lookup(file, args->handle);
317 ret = etnaviv_gem_cpu_prep(obj, args->op, &TS(args->timeout));
319 drm_gem_object_put_unlocked(obj);
324 static int etnaviv_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
325 struct drm_file *file)
327 struct drm_etnaviv_gem_cpu_fini *args = data;
328 struct drm_gem_object *obj;
334 obj = drm_gem_object_lookup(file, args->handle);
338 ret = etnaviv_gem_cpu_fini(obj);
340 drm_gem_object_put_unlocked(obj);
345 static int etnaviv_ioctl_gem_info(struct drm_device *dev, void *data,
346 struct drm_file *file)
348 struct drm_etnaviv_gem_info *args = data;
349 struct drm_gem_object *obj;
355 obj = drm_gem_object_lookup(file, args->handle);
359 ret = etnaviv_gem_mmap_offset(obj, &args->offset);
360 drm_gem_object_put_unlocked(obj);
365 static int etnaviv_ioctl_wait_fence(struct drm_device *dev, void *data,
366 struct drm_file *file)
368 struct drm_etnaviv_wait_fence *args = data;
369 struct etnaviv_drm_private *priv = dev->dev_private;
370 struct timespec *timeout = &TS(args->timeout);
371 struct etnaviv_gpu *gpu;
373 if (args->flags & ~(ETNA_WAIT_NONBLOCK))
376 if (args->pipe >= ETNA_MAX_PIPES)
379 gpu = priv->gpu[args->pipe];
383 if (args->flags & ETNA_WAIT_NONBLOCK)
386 return etnaviv_gpu_wait_fence_interruptible(gpu, args->fence,
390 static int etnaviv_ioctl_gem_userptr(struct drm_device *dev, void *data,
391 struct drm_file *file)
393 struct drm_etnaviv_gem_userptr *args = data;
396 if (args->flags & ~(ETNA_USERPTR_READ|ETNA_USERPTR_WRITE) ||
400 if (offset_in_page(args->user_ptr | args->user_size) ||
401 (uintptr_t)args->user_ptr != args->user_ptr ||
402 (u32)args->user_size != args->user_size ||
403 args->user_ptr & ~PAGE_MASK)
406 if (args->flags & ETNA_USERPTR_WRITE)
407 access = VERIFY_WRITE;
409 access = VERIFY_READ;
411 if (!access_ok(access, (void __user *)(unsigned long)args->user_ptr,
415 return etnaviv_gem_new_userptr(dev, file, args->user_ptr,
416 args->user_size, args->flags,
420 static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data,
421 struct drm_file *file)
423 struct etnaviv_drm_private *priv = dev->dev_private;
424 struct drm_etnaviv_gem_wait *args = data;
425 struct timespec *timeout = &TS(args->timeout);
426 struct drm_gem_object *obj;
427 struct etnaviv_gpu *gpu;
430 if (args->flags & ~(ETNA_WAIT_NONBLOCK))
433 if (args->pipe >= ETNA_MAX_PIPES)
436 gpu = priv->gpu[args->pipe];
440 obj = drm_gem_object_lookup(file, args->handle);
444 if (args->flags & ETNA_WAIT_NONBLOCK)
447 ret = etnaviv_gem_wait_bo(gpu, obj, timeout);
449 drm_gem_object_put_unlocked(obj);
454 static const struct drm_ioctl_desc etnaviv_ioctls[] = {
455 #define ETNA_IOCTL(n, func, flags) \
456 DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags)
457 ETNA_IOCTL(GET_PARAM, get_param, DRM_AUTH|DRM_RENDER_ALLOW),
458 ETNA_IOCTL(GEM_NEW, gem_new, DRM_AUTH|DRM_RENDER_ALLOW),
459 ETNA_IOCTL(GEM_INFO, gem_info, DRM_AUTH|DRM_RENDER_ALLOW),
460 ETNA_IOCTL(GEM_CPU_PREP, gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW),
461 ETNA_IOCTL(GEM_CPU_FINI, gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
462 ETNA_IOCTL(GEM_SUBMIT, gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
463 ETNA_IOCTL(WAIT_FENCE, wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
464 ETNA_IOCTL(GEM_USERPTR, gem_userptr, DRM_AUTH|DRM_RENDER_ALLOW),
465 ETNA_IOCTL(GEM_WAIT, gem_wait, DRM_AUTH|DRM_RENDER_ALLOW),
468 static const struct vm_operations_struct vm_ops = {
469 .fault = etnaviv_gem_fault,
470 .open = drm_gem_vm_open,
471 .close = drm_gem_vm_close,
474 static const struct file_operations fops = {
475 .owner = THIS_MODULE,
477 .release = drm_release,
478 .unlocked_ioctl = drm_ioctl,
479 .compat_ioctl = drm_compat_ioctl,
483 .mmap = etnaviv_gem_mmap,
486 static struct drm_driver etnaviv_drm_driver = {
487 .driver_features = DRIVER_GEM |
490 .open = etnaviv_open,
491 .postclose = etnaviv_postclose,
492 .gem_free_object_unlocked = etnaviv_gem_free_object,
493 .gem_vm_ops = &vm_ops,
494 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
495 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
496 .gem_prime_export = drm_gem_prime_export,
497 .gem_prime_import = drm_gem_prime_import,
498 .gem_prime_res_obj = etnaviv_gem_prime_res_obj,
499 .gem_prime_pin = etnaviv_gem_prime_pin,
500 .gem_prime_unpin = etnaviv_gem_prime_unpin,
501 .gem_prime_get_sg_table = etnaviv_gem_prime_get_sg_table,
502 .gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table,
503 .gem_prime_vmap = etnaviv_gem_prime_vmap,
504 .gem_prime_vunmap = etnaviv_gem_prime_vunmap,
505 .gem_prime_mmap = etnaviv_gem_prime_mmap,
506 #ifdef CONFIG_DEBUG_FS
507 .debugfs_init = etnaviv_debugfs_init,
509 .ioctls = etnaviv_ioctls,
510 .num_ioctls = DRM_ETNAVIV_NUM_IOCTLS,
513 .desc = "etnaviv DRM",
522 static int etnaviv_bind(struct device *dev)
524 struct etnaviv_drm_private *priv;
525 struct drm_device *drm;
528 drm = drm_dev_alloc(&etnaviv_drm_driver, dev);
532 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
534 dev_err(dev, "failed to allocate private data\n");
538 drm->dev_private = priv;
540 priv->wq = alloc_ordered_workqueue("etnaviv", 0);
546 mutex_init(&priv->gem_lock);
547 INIT_LIST_HEAD(&priv->gem_list);
550 dev_set_drvdata(dev, drm);
552 ret = component_bind_all(dev, drm);
558 ret = drm_dev_register(drm, 0);
565 component_unbind_all(dev, drm);
567 flush_workqueue(priv->wq);
568 destroy_workqueue(priv->wq);
577 static void etnaviv_unbind(struct device *dev)
579 struct drm_device *drm = dev_get_drvdata(dev);
580 struct etnaviv_drm_private *priv = drm->dev_private;
582 drm_dev_unregister(drm);
584 flush_workqueue(priv->wq);
585 destroy_workqueue(priv->wq);
587 component_unbind_all(dev, drm);
589 drm->dev_private = NULL;
595 static const struct component_master_ops etnaviv_master_ops = {
596 .bind = etnaviv_bind,
597 .unbind = etnaviv_unbind,
600 static int compare_of(struct device *dev, void *data)
602 struct device_node *np = data;
604 return dev->of_node == np;
607 static int compare_str(struct device *dev, void *data)
609 return !strcmp(dev_name(dev), data);
612 static int etnaviv_pdev_probe(struct platform_device *pdev)
614 struct device *dev = &pdev->dev;
615 struct device_node *node = dev->of_node;
616 struct component_match *match = NULL;
618 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
621 struct device_node *core_node;
625 core_node = of_parse_phandle(node, "cores", i);
629 drm_of_component_match_add(&pdev->dev, &match,
630 compare_of, core_node);
631 of_node_put(core_node);
633 } else if (dev->platform_data) {
634 char **names = dev->platform_data;
637 for (i = 0; names[i]; i++)
638 component_match_add(dev, &match, compare_str, names[i]);
641 return component_master_add_with_match(dev, &etnaviv_master_ops, match);
644 static int etnaviv_pdev_remove(struct platform_device *pdev)
646 component_master_del(&pdev->dev, &etnaviv_master_ops);
651 static const struct of_device_id dt_match[] = {
652 { .compatible = "fsl,imx-gpu-subsystem" },
653 { .compatible = "marvell,dove-gpu-subsystem" },
656 MODULE_DEVICE_TABLE(of, dt_match);
658 static struct platform_driver etnaviv_platform_driver = {
659 .probe = etnaviv_pdev_probe,
660 .remove = etnaviv_pdev_remove,
663 .of_match_table = dt_match,
667 static int __init etnaviv_init(void)
671 etnaviv_validate_init();
673 ret = platform_driver_register(&etnaviv_gpu_driver);
677 ret = platform_driver_register(&etnaviv_platform_driver);
679 platform_driver_unregister(&etnaviv_gpu_driver);
683 module_init(etnaviv_init);
685 static void __exit etnaviv_exit(void)
687 platform_driver_unregister(&etnaviv_gpu_driver);
688 platform_driver_unregister(&etnaviv_platform_driver);
690 module_exit(etnaviv_exit);
692 MODULE_AUTHOR("Christian Gmeiner <christian.gmeiner@gmail.com>");
693 MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
694 MODULE_AUTHOR("Lucas Stach <l.stach@pengutronix.de>");
695 MODULE_DESCRIPTION("etnaviv DRM Driver");
696 MODULE_LICENSE("GPL v2");
697 MODULE_ALIAS("platform:etnaviv");