GNU Linux-libre 5.10.219-gnu1
[releases.git] / drivers / gpu / drm / drm_vm.c
1 /*
2  * \file drm_vm.c
3  * Memory mapping for DRM
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8
9 /*
10  * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
11  *
12  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35
36 #include <linux/export.h>
37 #include <linux/pci.h>
38 #include <linux/seq_file.h>
39 #include <linux/vmalloc.h>
40 #include <linux/pgtable.h>
41
42 #if defined(__ia64__)
43 #include <linux/efi.h>
44 #include <linux/slab.h>
45 #endif
46 #include <linux/mem_encrypt.h>
47
48
49 #include <drm/drm_agpsupport.h>
50 #include <drm/drm_device.h>
51 #include <drm/drm_drv.h>
52 #include <drm/drm_file.h>
53 #include <drm/drm_framebuffer.h>
54 #include <drm/drm_print.h>
55
56 #include "drm_internal.h"
57 #include "drm_legacy.h"
58
59 struct drm_vma_entry {
60         struct list_head head;
61         struct vm_area_struct *vma;
62         pid_t pid;
63 };
64
65 static void drm_vm_open(struct vm_area_struct *vma);
66 static void drm_vm_close(struct vm_area_struct *vma);
67
68 static pgprot_t drm_io_prot(struct drm_local_map *map,
69                             struct vm_area_struct *vma)
70 {
71         pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
72
73         /* We don't want graphics memory to be mapped encrypted */
74         tmp = pgprot_decrypted(tmp);
75
76 #if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \
77     defined(__mips__)
78         if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
79                 tmp = pgprot_noncached(tmp);
80         else
81                 tmp = pgprot_writecombine(tmp);
82 #elif defined(__ia64__)
83         if (efi_range_is_wc(vma->vm_start, vma->vm_end -
84                                     vma->vm_start))
85                 tmp = pgprot_writecombine(tmp);
86         else
87                 tmp = pgprot_noncached(tmp);
88 #elif defined(__sparc__) || defined(__arm__)
89         tmp = pgprot_noncached(tmp);
90 #endif
91         return tmp;
92 }
93
94 static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
95 {
96         pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
97
98 #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
99         tmp = pgprot_noncached_wc(tmp);
100 #endif
101         return tmp;
102 }
103
104 /*
105  * \c fault method for AGP virtual memory.
106  *
107  * \param vma virtual memory area.
108  * \param address access address.
109  * \return pointer to the page structure.
110  *
111  * Find the right map and if it's AGP memory find the real physical page to
112  * map, get the page, increment the use count and return it.
113  */
114 #if IS_ENABLED(CONFIG_AGP)
115 static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
116 {
117         struct vm_area_struct *vma = vmf->vma;
118         struct drm_file *priv = vma->vm_file->private_data;
119         struct drm_device *dev = priv->minor->dev;
120         struct drm_local_map *map = NULL;
121         struct drm_map_list *r_list;
122         struct drm_hash_item *hash;
123
124         /*
125          * Find the right map
126          */
127         if (!dev->agp)
128                 goto vm_fault_error;
129
130         if (!dev->agp || !dev->agp->cant_use_aperture)
131                 goto vm_fault_error;
132
133         if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
134                 goto vm_fault_error;
135
136         r_list = drm_hash_entry(hash, struct drm_map_list, hash);
137         map = r_list->map;
138
139         if (map && map->type == _DRM_AGP) {
140                 /*
141                  * Using vm_pgoff as a selector forces us to use this unusual
142                  * addressing scheme.
143                  */
144                 resource_size_t offset = vmf->address - vma->vm_start;
145                 resource_size_t baddr = map->offset + offset;
146                 struct drm_agp_mem *agpmem;
147                 struct page *page;
148
149 #ifdef __alpha__
150                 /*
151                  * Adjust to a bus-relative address
152                  */
153                 baddr -= dev->hose->mem_space->start;
154 #endif
155
156                 /*
157                  * It's AGP memory - find the real physical page to map
158                  */
159                 list_for_each_entry(agpmem, &dev->agp->memory, head) {
160                         if (agpmem->bound <= baddr &&
161                             agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
162                                 break;
163                 }
164
165                 if (&agpmem->head == &dev->agp->memory)
166                         goto vm_fault_error;
167
168                 /*
169                  * Get the page, inc the use count, and return it
170                  */
171                 offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
172                 page = agpmem->memory->pages[offset];
173                 get_page(page);
174                 vmf->page = page;
175
176                 DRM_DEBUG
177                     ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
178                      (unsigned long long)baddr,
179                      agpmem->memory->pages[offset],
180                      (unsigned long long)offset,
181                      page_count(page));
182                 return 0;
183         }
184 vm_fault_error:
185         return VM_FAULT_SIGBUS; /* Disallow mremap */
186 }
187 #else
188 static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
189 {
190         return VM_FAULT_SIGBUS;
191 }
192 #endif
193
194 /*
195  * \c nopage method for shared virtual memory.
196  *
197  * \param vma virtual memory area.
198  * \param address access address.
199  * \return pointer to the page structure.
200  *
201  * Get the mapping, find the real physical page to map, get the page, and
202  * return it.
203  */
204 static vm_fault_t drm_vm_shm_fault(struct vm_fault *vmf)
205 {
206         struct vm_area_struct *vma = vmf->vma;
207         struct drm_local_map *map = vma->vm_private_data;
208         unsigned long offset;
209         unsigned long i;
210         struct page *page;
211
212         if (!map)
213                 return VM_FAULT_SIGBUS; /* Nothing allocated */
214
215         offset = vmf->address - vma->vm_start;
216         i = (unsigned long)map->handle + offset;
217         page = vmalloc_to_page((void *)i);
218         if (!page)
219                 return VM_FAULT_SIGBUS;
220         get_page(page);
221         vmf->page = page;
222
223         DRM_DEBUG("shm_fault 0x%lx\n", offset);
224         return 0;
225 }
226
227 /*
228  * \c close method for shared virtual memory.
229  *
230  * \param vma virtual memory area.
231  *
232  * Deletes map information if we are the last
233  * person to close a mapping and it's not in the global maplist.
234  */
235 static void drm_vm_shm_close(struct vm_area_struct *vma)
236 {
237         struct drm_file *priv = vma->vm_file->private_data;
238         struct drm_device *dev = priv->minor->dev;
239         struct drm_vma_entry *pt, *temp;
240         struct drm_local_map *map;
241         struct drm_map_list *r_list;
242         int found_maps = 0;
243
244         DRM_DEBUG("0x%08lx,0x%08lx\n",
245                   vma->vm_start, vma->vm_end - vma->vm_start);
246
247         map = vma->vm_private_data;
248
249         mutex_lock(&dev->struct_mutex);
250         list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
251                 if (pt->vma->vm_private_data == map)
252                         found_maps++;
253                 if (pt->vma == vma) {
254                         list_del(&pt->head);
255                         kfree(pt);
256                 }
257         }
258
259         /* We were the only map that was found */
260         if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
261                 /* Check to see if we are in the maplist, if we are not, then
262                  * we delete this mappings information.
263                  */
264                 found_maps = 0;
265                 list_for_each_entry(r_list, &dev->maplist, head) {
266                         if (r_list->map == map)
267                                 found_maps++;
268                 }
269
270                 if (!found_maps) {
271                         switch (map->type) {
272                         case _DRM_REGISTERS:
273                         case _DRM_FRAME_BUFFER:
274                                 arch_phys_wc_del(map->mtrr);
275                                 iounmap(map->handle);
276                                 break;
277                         case _DRM_SHM:
278                                 vfree(map->handle);
279                                 break;
280                         case _DRM_AGP:
281                         case _DRM_SCATTER_GATHER:
282                                 break;
283                         case _DRM_CONSISTENT:
284                                 dma_free_coherent(&dev->pdev->dev,
285                                                   map->size,
286                                                   map->handle,
287                                                   map->offset);
288                                 break;
289                         }
290                         kfree(map);
291                 }
292         }
293         mutex_unlock(&dev->struct_mutex);
294 }
295
296 /*
297  * \c fault method for DMA virtual memory.
298  *
299  * \param address access address.
300  * \return pointer to the page structure.
301  *
302  * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
303  */
304 static vm_fault_t drm_vm_dma_fault(struct vm_fault *vmf)
305 {
306         struct vm_area_struct *vma = vmf->vma;
307         struct drm_file *priv = vma->vm_file->private_data;
308         struct drm_device *dev = priv->minor->dev;
309         struct drm_device_dma *dma = dev->dma;
310         unsigned long offset;
311         unsigned long page_nr;
312         struct page *page;
313
314         if (!dma)
315                 return VM_FAULT_SIGBUS; /* Error */
316         if (!dma->pagelist)
317                 return VM_FAULT_SIGBUS; /* Nothing allocated */
318
319         offset = vmf->address - vma->vm_start;
320                                         /* vm_[pg]off[set] should be 0 */
321         page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
322         page = virt_to_page((void *)dma->pagelist[page_nr]);
323
324         get_page(page);
325         vmf->page = page;
326
327         DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
328         return 0;
329 }
330
331 /*
332  * \c fault method for scatter-gather virtual memory.
333  *
334  * \param address access address.
335  * \return pointer to the page structure.
336  *
337  * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
338  */
339 static vm_fault_t drm_vm_sg_fault(struct vm_fault *vmf)
340 {
341         struct vm_area_struct *vma = vmf->vma;
342         struct drm_local_map *map = vma->vm_private_data;
343         struct drm_file *priv = vma->vm_file->private_data;
344         struct drm_device *dev = priv->minor->dev;
345         struct drm_sg_mem *entry = dev->sg;
346         unsigned long offset;
347         unsigned long map_offset;
348         unsigned long page_offset;
349         struct page *page;
350
351         if (!entry)
352                 return VM_FAULT_SIGBUS; /* Error */
353         if (!entry->pagelist)
354                 return VM_FAULT_SIGBUS; /* Nothing allocated */
355
356         offset = vmf->address - vma->vm_start;
357         map_offset = map->offset - (unsigned long)dev->sg->virtual;
358         page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
359         page = entry->pagelist[page_offset];
360         get_page(page);
361         vmf->page = page;
362
363         return 0;
364 }
365
366 /** AGP virtual memory operations */
367 static const struct vm_operations_struct drm_vm_ops = {
368         .fault = drm_vm_fault,
369         .open = drm_vm_open,
370         .close = drm_vm_close,
371 };
372
373 /** Shared virtual memory operations */
374 static const struct vm_operations_struct drm_vm_shm_ops = {
375         .fault = drm_vm_shm_fault,
376         .open = drm_vm_open,
377         .close = drm_vm_shm_close,
378 };
379
380 /** DMA virtual memory operations */
381 static const struct vm_operations_struct drm_vm_dma_ops = {
382         .fault = drm_vm_dma_fault,
383         .open = drm_vm_open,
384         .close = drm_vm_close,
385 };
386
387 /** Scatter-gather virtual memory operations */
388 static const struct vm_operations_struct drm_vm_sg_ops = {
389         .fault = drm_vm_sg_fault,
390         .open = drm_vm_open,
391         .close = drm_vm_close,
392 };
393
394 static void drm_vm_open_locked(struct drm_device *dev,
395                                struct vm_area_struct *vma)
396 {
397         struct drm_vma_entry *vma_entry;
398
399         DRM_DEBUG("0x%08lx,0x%08lx\n",
400                   vma->vm_start, vma->vm_end - vma->vm_start);
401
402         vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
403         if (vma_entry) {
404                 vma_entry->vma = vma;
405                 vma_entry->pid = current->pid;
406                 list_add(&vma_entry->head, &dev->vmalist);
407         }
408 }
409
410 static void drm_vm_open(struct vm_area_struct *vma)
411 {
412         struct drm_file *priv = vma->vm_file->private_data;
413         struct drm_device *dev = priv->minor->dev;
414
415         mutex_lock(&dev->struct_mutex);
416         drm_vm_open_locked(dev, vma);
417         mutex_unlock(&dev->struct_mutex);
418 }
419
420 static void drm_vm_close_locked(struct drm_device *dev,
421                                 struct vm_area_struct *vma)
422 {
423         struct drm_vma_entry *pt, *temp;
424
425         DRM_DEBUG("0x%08lx,0x%08lx\n",
426                   vma->vm_start, vma->vm_end - vma->vm_start);
427
428         list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
429                 if (pt->vma == vma) {
430                         list_del(&pt->head);
431                         kfree(pt);
432                         break;
433                 }
434         }
435 }
436
437 /*
438  * \c close method for all virtual memory types.
439  *
440  * \param vma virtual memory area.
441  *
442  * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
443  * free it.
444  */
445 static void drm_vm_close(struct vm_area_struct *vma)
446 {
447         struct drm_file *priv = vma->vm_file->private_data;
448         struct drm_device *dev = priv->minor->dev;
449
450         mutex_lock(&dev->struct_mutex);
451         drm_vm_close_locked(dev, vma);
452         mutex_unlock(&dev->struct_mutex);
453 }
454
455 /*
456  * mmap DMA memory.
457  *
458  * \param file_priv DRM file private.
459  * \param vma virtual memory area.
460  * \return zero on success or a negative number on failure.
461  *
462  * Sets the virtual memory area operations structure to vm_dma_ops, the file
463  * pointer, and calls vm_open().
464  */
465 static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
466 {
467         struct drm_file *priv = filp->private_data;
468         struct drm_device *dev;
469         struct drm_device_dma *dma;
470         unsigned long length = vma->vm_end - vma->vm_start;
471
472         dev = priv->minor->dev;
473         dma = dev->dma;
474         DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
475                   vma->vm_start, vma->vm_end, vma->vm_pgoff);
476
477         /* Length must match exact page count */
478         if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
479                 return -EINVAL;
480         }
481
482         if (!capable(CAP_SYS_ADMIN) &&
483             (dma->flags & _DRM_DMA_USE_PCI_RO)) {
484                 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
485 #if defined(__i386__) || defined(__x86_64__)
486                 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
487 #else
488                 /* Ye gads this is ugly.  With more thought
489                    we could move this up higher and use
490                    `protection_map' instead.  */
491                 vma->vm_page_prot =
492                     __pgprot(pte_val
493                              (pte_wrprotect
494                               (__pte(pgprot_val(vma->vm_page_prot)))));
495 #endif
496         }
497
498         vma->vm_ops = &drm_vm_dma_ops;
499
500         vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
501
502         drm_vm_open_locked(dev, vma);
503         return 0;
504 }
505
506 static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
507 {
508 #ifdef __alpha__
509         return dev->hose->dense_mem_base;
510 #else
511         return 0;
512 #endif
513 }
514
515 /*
516  * mmap DMA memory.
517  *
518  * \param file_priv DRM file private.
519  * \param vma virtual memory area.
520  * \return zero on success or a negative number on failure.
521  *
522  * If the virtual memory area has no offset associated with it then it's a DMA
523  * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
524  * checks that the restricted flag is not set, sets the virtual memory operations
525  * according to the mapping type and remaps the pages. Finally sets the file
526  * pointer and calls vm_open().
527  */
528 static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
529 {
530         struct drm_file *priv = filp->private_data;
531         struct drm_device *dev = priv->minor->dev;
532         struct drm_local_map *map = NULL;
533         resource_size_t offset = 0;
534         struct drm_hash_item *hash;
535
536         DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
537                   vma->vm_start, vma->vm_end, vma->vm_pgoff);
538
539         if (!priv->authenticated)
540                 return -EACCES;
541
542         /* We check for "dma". On Apple's UniNorth, it's valid to have
543          * the AGP mapped at physical address 0
544          * --BenH.
545          */
546         if (!vma->vm_pgoff
547 #if IS_ENABLED(CONFIG_AGP)
548             && (!dev->agp
549                 || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
550 #endif
551             )
552                 return drm_mmap_dma(filp, vma);
553
554         if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
555                 DRM_ERROR("Could not find map\n");
556                 return -EINVAL;
557         }
558
559         map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
560         if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
561                 return -EPERM;
562
563         /* Check for valid size. */
564         if (map->size < vma->vm_end - vma->vm_start)
565                 return -EINVAL;
566
567         if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
568                 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
569 #if defined(__i386__) || defined(__x86_64__)
570                 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
571 #else
572                 /* Ye gads this is ugly.  With more thought
573                    we could move this up higher and use
574                    `protection_map' instead.  */
575                 vma->vm_page_prot =
576                     __pgprot(pte_val
577                              (pte_wrprotect
578                               (__pte(pgprot_val(vma->vm_page_prot)))));
579 #endif
580         }
581
582         switch (map->type) {
583 #if !defined(__arm__)
584         case _DRM_AGP:
585                 if (dev->agp && dev->agp->cant_use_aperture) {
586                         /*
587                          * On some platforms we can't talk to bus dma address from the CPU, so for
588                          * memory of type DRM_AGP, we'll deal with sorting out the real physical
589                          * pages and mappings in fault()
590                          */
591 #if defined(__powerpc__)
592                         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
593 #endif
594                         vma->vm_ops = &drm_vm_ops;
595                         break;
596                 }
597                 fallthrough;    /* to _DRM_FRAME_BUFFER... */
598 #endif
599         case _DRM_FRAME_BUFFER:
600         case _DRM_REGISTERS:
601                 offset = drm_core_get_reg_ofs(dev);
602                 vma->vm_page_prot = drm_io_prot(map, vma);
603                 if (io_remap_pfn_range(vma, vma->vm_start,
604                                        (map->offset + offset) >> PAGE_SHIFT,
605                                        vma->vm_end - vma->vm_start,
606                                        vma->vm_page_prot))
607                         return -EAGAIN;
608                 DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
609                           " offset = 0x%llx\n",
610                           map->type,
611                           vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
612
613                 vma->vm_ops = &drm_vm_ops;
614                 break;
615         case _DRM_CONSISTENT:
616                 /* Consistent memory is really like shared memory. But
617                  * it's allocated in a different way, so avoid fault */
618                 if (remap_pfn_range(vma, vma->vm_start,
619                     page_to_pfn(virt_to_page(map->handle)),
620                     vma->vm_end - vma->vm_start, vma->vm_page_prot))
621                         return -EAGAIN;
622                 vma->vm_page_prot = drm_dma_prot(map->type, vma);
623                 fallthrough;    /* to _DRM_SHM */
624         case _DRM_SHM:
625                 vma->vm_ops = &drm_vm_shm_ops;
626                 vma->vm_private_data = (void *)map;
627                 break;
628         case _DRM_SCATTER_GATHER:
629                 vma->vm_ops = &drm_vm_sg_ops;
630                 vma->vm_private_data = (void *)map;
631                 vma->vm_page_prot = drm_dma_prot(map->type, vma);
632                 break;
633         default:
634                 return -EINVAL; /* This should never happen. */
635         }
636         vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
637
638         drm_vm_open_locked(dev, vma);
639         return 0;
640 }
641
642 int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma)
643 {
644         struct drm_file *priv = filp->private_data;
645         struct drm_device *dev = priv->minor->dev;
646         int ret;
647
648         if (drm_dev_is_unplugged(dev))
649                 return -ENODEV;
650
651         mutex_lock(&dev->struct_mutex);
652         ret = drm_mmap_locked(filp, vma);
653         mutex_unlock(&dev->struct_mutex);
654
655         return ret;
656 }
657 EXPORT_SYMBOL(drm_legacy_mmap);
658
659 #if IS_ENABLED(CONFIG_DRM_LEGACY)
660 void drm_legacy_vma_flush(struct drm_device *dev)
661 {
662         struct drm_vma_entry *vma, *vma_temp;
663
664         /* Clear vma list (only needed for legacy drivers) */
665         list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
666                 list_del(&vma->head);
667                 kfree(vma);
668         }
669 }
670 #endif