2 * Legacy: Generic DRM Buffer Management
4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8 * Author: Rickard E. (Rik) Faith <faith@valinux.com>
9 * Author: Gareth Hughes <gareth@valinux.com>
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the "Software"),
13 * to deal in the Software without restriction, including without limitation
14 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15 * and/or sell copies of the Software, and to permit persons to whom the
16 * Software is furnished to do so, subject to the following conditions:
18 * The above copyright notice and this permission notice (including the next
19 * paragraph) shall be included in all copies or substantial portions of the
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
25 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
26 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
27 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
31 #include <linux/vmalloc.h>
32 #include <linux/slab.h>
33 #include <linux/log2.h>
34 #include <linux/export.h>
35 #include <asm/shmparam.h>
37 #include "drm_legacy.h"
39 #include <linux/nospec.h>
41 static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
42 struct drm_local_map *map)
44 struct drm_map_list *entry;
45 list_for_each_entry(entry, &dev->maplist, head) {
47 * Because the kernel-userspace ABI is fixed at a 32-bit offset
48 * while PCI resources may live above that, we only compare the
49 * lower 32 bits of the map offset for maps of type
50 * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
51 * It is assumed that if a driver have more than one resource
52 * of each type, the lower 32 bits are different.
55 map->type != entry->map->type ||
56 entry->master != dev->primary->master)
60 if (map->flags != _DRM_CONTAINS_LOCK)
64 case _DRM_FRAME_BUFFER:
65 if ((entry->map->offset & 0xffffffff) ==
66 (map->offset & 0xffffffff))
68 default: /* Make gcc happy */
71 if (entry->map->offset == map->offset)
78 static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
79 unsigned long user_token, int hashed_handle, int shm)
81 int use_hashed_handle, shift;
84 #if (BITS_PER_LONG == 64)
85 use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
86 #elif (BITS_PER_LONG == 32)
87 use_hashed_handle = hashed_handle;
89 #error Unsupported long size. Neither 64 nor 32 bits.
92 if (!use_hashed_handle) {
94 hash->key = user_token >> PAGE_SHIFT;
95 ret = drm_ht_insert_item(&dev->map_hash, hash);
101 add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
102 if (shm && (SHMLBA > PAGE_SIZE)) {
103 int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
105 /* For shared memory, we have to preserve the SHMLBA
106 * bits of the eventual vma->vm_pgoff value during
107 * mmap(). Otherwise we run into cache aliasing problems
108 * on some platforms. On these platforms, the pgoff of
109 * a mmap() request is used to pick a suitable virtual
110 * address for the mmap() region such that it will not
111 * cause cache aliasing problems.
113 * Therefore, make sure the SHMLBA relevant bits of the
114 * hash value we use are equal to those in the original
115 * kernel virtual address.
118 add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
121 return drm_ht_just_insert_please(&dev->map_hash, hash,
122 user_token, 32 - PAGE_SHIFT - 3,
127 * Core function to create a range of memory available for mapping by a
130 * Adjusts the memory offset to its absolute value according to the mapping
131 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
132 * applicable and if supported by the kernel.
134 static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
135 unsigned int size, enum drm_map_type type,
136 enum drm_map_flags flags,
137 struct drm_map_list ** maplist)
139 struct drm_local_map *map;
140 struct drm_map_list *list;
141 drm_dma_handle_t *dmah;
142 unsigned long user_token;
145 map = kmalloc(sizeof(*map), GFP_KERNEL);
149 map->offset = offset;
154 /* Only allow shared memory to be removable since we only keep enough
155 * book keeping information about shared memory to allow for removal
156 * when processes fork.
158 if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
162 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
163 (unsigned long long)map->offset, map->size, map->type);
165 /* page-align _DRM_SHM maps. They are allocated here so there is no security
166 * hole created by that and it works around various broken drivers that use
167 * a non-aligned quantity to map the SAREA. --BenH
169 if (map->type == _DRM_SHM)
170 map->size = PAGE_ALIGN(map->size);
172 if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
181 case _DRM_FRAME_BUFFER:
182 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
183 if (map->offset + (map->size-1) < map->offset ||
184 map->offset < virt_to_phys(high_memory)) {
189 /* Some drivers preinitialize some maps, without the X Server
190 * needing to be aware of it. Therefore, we just return success
191 * when the server tries to create a duplicate map.
193 list = drm_find_matching_map(dev, map);
195 if (list->map->size != map->size) {
196 DRM_DEBUG("Matching maps of type %d with "
197 "mismatched sizes, (%ld vs %ld)\n",
198 map->type, map->size,
200 list->map->size = map->size;
208 if (map->type == _DRM_FRAME_BUFFER ||
209 (map->flags & _DRM_WRITE_COMBINING)) {
211 arch_phys_wc_add(map->offset, map->size);
213 if (map->type == _DRM_REGISTERS) {
214 if (map->flags & _DRM_WRITE_COMBINING)
215 map->handle = ioremap_wc(map->offset,
218 map->handle = ioremap(map->offset, map->size);
227 list = drm_find_matching_map(dev, map);
229 if(list->map->size != map->size) {
230 DRM_DEBUG("Matching maps of type %d with "
231 "mismatched sizes, (%ld vs %ld)\n",
232 map->type, map->size, list->map->size);
233 list->map->size = map->size;
240 map->handle = vmalloc_user(map->size);
241 DRM_DEBUG("%lu %d %p\n",
242 map->size, order_base_2(map->size), map->handle);
247 map->offset = (unsigned long)map->handle;
248 if (map->flags & _DRM_CONTAINS_LOCK) {
249 /* Prevent a 2nd X Server from creating a 2nd lock */
250 if (dev->primary->master->lock.hw_lock != NULL) {
255 dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */
259 struct drm_agp_mem *entry;
267 map->offset += dev->hose->mem_space->start;
269 /* In some cases (i810 driver), user space may have already
270 * added the AGP base itself, because dev->agp->base previously
271 * only got set during AGP enable. So, only add the base
272 * address if the map's offset isn't already within the
275 if (map->offset < dev->agp->base ||
276 map->offset > dev->agp->base +
277 dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
278 map->offset += dev->agp->base;
280 map->mtrr = dev->agp->agp_mtrr; /* for getmap */
282 /* This assumes the DRM is in total control of AGP space.
283 * It's not always the case as AGP can be in the control
284 * of user space (i.e. i810 driver). So this loop will get
285 * skipped and we double check that dev->agp->memory is
286 * actually set as well as being invalid before EPERM'ing
288 list_for_each_entry(entry, &dev->agp->memory, head) {
289 if ((map->offset >= entry->bound) &&
290 (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
295 if (!list_empty(&dev->agp->memory) && !valid) {
299 DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
300 (unsigned long long)map->offset, map->size);
304 case _DRM_SCATTER_GATHER:
309 map->offset += (unsigned long)dev->sg->virtual;
311 case _DRM_CONSISTENT:
312 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
313 * As we're limiting the address to 2^32-1 (or less),
314 * casting it down to 32 bits is no problem, but we
315 * need to point to a 64bit variable first. */
316 dmah = drm_pci_alloc(dev, map->size, map->size);
321 map->handle = dmah->vaddr;
322 map->offset = (unsigned long)dmah->busaddr;
330 list = kzalloc(sizeof(*list), GFP_KERNEL);
332 if (map->type == _DRM_REGISTERS)
333 iounmap(map->handle);
339 mutex_lock(&dev->struct_mutex);
340 list_add(&list->head, &dev->maplist);
342 /* Assign a 32-bit handle */
343 /* We do it here so that dev->struct_mutex protects the increment */
344 user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
346 ret = drm_map_handle(dev, &list->hash, user_token, 0,
347 (map->type == _DRM_SHM));
349 if (map->type == _DRM_REGISTERS)
350 iounmap(map->handle);
353 mutex_unlock(&dev->struct_mutex);
357 list->user_token = list->hash.key << PAGE_SHIFT;
358 mutex_unlock(&dev->struct_mutex);
360 if (!(map->flags & _DRM_DRIVER))
361 list->master = dev->primary->master;
366 int drm_legacy_addmap(struct drm_device * dev, resource_size_t offset,
367 unsigned int size, enum drm_map_type type,
368 enum drm_map_flags flags, struct drm_local_map **map_ptr)
370 struct drm_map_list *list;
373 rc = drm_addmap_core(dev, offset, size, type, flags, &list);
375 *map_ptr = list->map;
378 EXPORT_SYMBOL(drm_legacy_addmap);
381 * Ioctl to specify a range of memory that is available for mapping by a
384 * \param inode device inode.
385 * \param file_priv DRM file private.
386 * \param cmd command.
387 * \param arg pointer to a drm_map structure.
388 * \return zero on success or a negative value on error.
391 int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
392 struct drm_file *file_priv)
394 struct drm_map *map = data;
395 struct drm_map_list *maplist;
398 if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
401 err = drm_addmap_core(dev, map->offset, map->size, map->type,
402 map->flags, &maplist);
407 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
408 map->handle = (void *)(unsigned long)maplist->user_token;
411 * It appears that there are no users of this value whatsoever --
412 * drmAddMap just discards it. Let's not encourage its use.
413 * (Keeping drm_addmap_core's returned mtrr value would be wrong --
414 * it's not a real mtrr index anymore.)
422 * Remove a map private from list and deallocate resources if the mapping
425 * Searches the map on drm_device::maplist, removes it from the list, see if
426 * its being used, and free any associate resource (such as MTRR's) if it's not
429 * \sa drm_legacy_addmap
431 int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
433 struct drm_map_list *r_list = NULL, *list_t;
434 drm_dma_handle_t dmah;
436 struct drm_master *master;
438 /* Find the list entry for the map and remove it */
439 list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
440 if (r_list->map == map) {
441 master = r_list->master;
442 list_del(&r_list->head);
443 drm_ht_remove_key(&dev->map_hash,
444 r_list->user_token >> PAGE_SHIFT);
456 iounmap(map->handle);
458 case _DRM_FRAME_BUFFER:
459 arch_phys_wc_del(map->mtrr);
464 if (dev->sigdata.lock == master->lock.hw_lock)
465 dev->sigdata.lock = NULL;
466 master->lock.hw_lock = NULL; /* SHM removed */
467 master->lock.file_priv = NULL;
468 wake_up_interruptible_all(&master->lock.lock_queue);
472 case _DRM_SCATTER_GATHER:
474 case _DRM_CONSISTENT:
475 dmah.vaddr = map->handle;
476 dmah.busaddr = map->offset;
477 dmah.size = map->size;
478 __drm_legacy_pci_free(dev, &dmah);
485 EXPORT_SYMBOL(drm_legacy_rmmap_locked);
487 int drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
491 mutex_lock(&dev->struct_mutex);
492 ret = drm_legacy_rmmap_locked(dev, map);
493 mutex_unlock(&dev->struct_mutex);
497 EXPORT_SYMBOL(drm_legacy_rmmap);
499 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
500 * the last close of the device, and this is necessary for cleanup when things
501 * exit uncleanly. Therefore, having userland manually remove mappings seems
502 * like a pointless exercise since they're going away anyway.
504 * One use case might be after addmap is allowed for normal users for SHM and
505 * gets used by drivers that the server doesn't need to care about. This seems
508 * \param inode device inode.
509 * \param file_priv DRM file private.
510 * \param cmd command.
511 * \param arg pointer to a struct drm_map structure.
512 * \return zero on success or a negative value on error.
514 int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
515 struct drm_file *file_priv)
517 struct drm_map *request = data;
518 struct drm_local_map *map = NULL;
519 struct drm_map_list *r_list;
522 mutex_lock(&dev->struct_mutex);
523 list_for_each_entry(r_list, &dev->maplist, head) {
525 r_list->user_token == (unsigned long)request->handle &&
526 r_list->map->flags & _DRM_REMOVABLE) {
532 /* List has wrapped around to the head pointer, or its empty we didn't
535 if (list_empty(&dev->maplist) || !map) {
536 mutex_unlock(&dev->struct_mutex);
540 /* Register and framebuffer maps are permanent */
541 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
542 mutex_unlock(&dev->struct_mutex);
546 ret = drm_legacy_rmmap_locked(dev, map);
548 mutex_unlock(&dev->struct_mutex);
554 * Cleanup after an error on one of the addbufs() functions.
556 * \param dev DRM device.
557 * \param entry buffer entry where the error occurred.
559 * Frees any pages and buffers associated with the given entry.
561 static void drm_cleanup_buf_error(struct drm_device * dev,
562 struct drm_buf_entry * entry)
566 if (entry->seg_count) {
567 for (i = 0; i < entry->seg_count; i++) {
568 if (entry->seglist[i]) {
569 drm_pci_free(dev, entry->seglist[i]);
572 kfree(entry->seglist);
574 entry->seg_count = 0;
577 if (entry->buf_count) {
578 for (i = 0; i < entry->buf_count; i++) {
579 kfree(entry->buflist[i].dev_private);
581 kfree(entry->buflist);
583 entry->buf_count = 0;
587 #if IS_ENABLED(CONFIG_AGP)
589 * Add AGP buffers for DMA transfers.
591 * \param dev struct drm_device to which the buffers are to be added.
592 * \param request pointer to a struct drm_buf_desc describing the request.
593 * \return zero on success or a negative number on failure.
595 * After some sanity checks creates a drm_buf structure for each buffer and
596 * reallocates the buffer list of the same size order to accommodate the new
599 int drm_legacy_addbufs_agp(struct drm_device *dev,
600 struct drm_buf_desc *request)
602 struct drm_device_dma *dma = dev->dma;
603 struct drm_buf_entry *entry;
604 struct drm_agp_mem *agp_entry;
606 unsigned long offset;
607 unsigned long agp_offset;
616 struct drm_buf **temp_buflist;
621 count = request->count;
622 order = order_base_2(request->size);
625 alignment = (request->flags & _DRM_PAGE_ALIGN)
626 ? PAGE_ALIGN(size) : size;
627 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
628 total = PAGE_SIZE << page_order;
631 agp_offset = dev->agp->base + request->agp_start;
633 DRM_DEBUG("count: %d\n", count);
634 DRM_DEBUG("order: %d\n", order);
635 DRM_DEBUG("size: %d\n", size);
636 DRM_DEBUG("agp_offset: %lx\n", agp_offset);
637 DRM_DEBUG("alignment: %d\n", alignment);
638 DRM_DEBUG("page_order: %d\n", page_order);
639 DRM_DEBUG("total: %d\n", total);
641 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
644 /* Make sure buffers are located in AGP memory that we own */
646 list_for_each_entry(agp_entry, &dev->agp->memory, head) {
647 if ((agp_offset >= agp_entry->bound) &&
648 (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
653 if (!list_empty(&dev->agp->memory) && !valid) {
654 DRM_DEBUG("zone invalid\n");
657 spin_lock(&dev->buf_lock);
659 spin_unlock(&dev->buf_lock);
662 atomic_inc(&dev->buf_alloc);
663 spin_unlock(&dev->buf_lock);
665 mutex_lock(&dev->struct_mutex);
666 entry = &dma->bufs[order];
667 if (entry->buf_count) {
668 mutex_unlock(&dev->struct_mutex);
669 atomic_dec(&dev->buf_alloc);
670 return -ENOMEM; /* May only call once for each order */
673 if (count < 0 || count > 4096) {
674 mutex_unlock(&dev->struct_mutex);
675 atomic_dec(&dev->buf_alloc);
679 entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
680 if (!entry->buflist) {
681 mutex_unlock(&dev->struct_mutex);
682 atomic_dec(&dev->buf_alloc);
686 entry->buf_size = size;
687 entry->page_order = page_order;
691 while (entry->buf_count < count) {
692 buf = &entry->buflist[entry->buf_count];
693 buf->idx = dma->buf_count + entry->buf_count;
694 buf->total = alignment;
698 buf->offset = (dma->byte_count + offset);
699 buf->bus_address = agp_offset + offset;
700 buf->address = (void *)(agp_offset + offset);
704 buf->file_priv = NULL;
706 buf->dev_priv_size = dev->driver->dev_priv_size;
707 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
708 if (!buf->dev_private) {
709 /* Set count correctly so we free the proper amount. */
710 entry->buf_count = count;
711 drm_cleanup_buf_error(dev, entry);
712 mutex_unlock(&dev->struct_mutex);
713 atomic_dec(&dev->buf_alloc);
717 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
721 byte_count += PAGE_SIZE << page_order;
724 DRM_DEBUG("byte_count: %d\n", byte_count);
726 temp_buflist = krealloc(dma->buflist,
727 (dma->buf_count + entry->buf_count) *
728 sizeof(*dma->buflist), GFP_KERNEL);
730 /* Free the entry because it isn't valid */
731 drm_cleanup_buf_error(dev, entry);
732 mutex_unlock(&dev->struct_mutex);
733 atomic_dec(&dev->buf_alloc);
736 dma->buflist = temp_buflist;
738 for (i = 0; i < entry->buf_count; i++) {
739 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
742 dma->buf_count += entry->buf_count;
743 dma->seg_count += entry->seg_count;
744 dma->page_count += byte_count >> PAGE_SHIFT;
745 dma->byte_count += byte_count;
747 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
748 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
750 mutex_unlock(&dev->struct_mutex);
752 request->count = entry->buf_count;
753 request->size = size;
755 dma->flags = _DRM_DMA_USE_AGP;
757 atomic_dec(&dev->buf_alloc);
760 EXPORT_SYMBOL(drm_legacy_addbufs_agp);
761 #endif /* CONFIG_AGP */
763 int drm_legacy_addbufs_pci(struct drm_device *dev,
764 struct drm_buf_desc *request)
766 struct drm_device_dma *dma = dev->dma;
772 struct drm_buf_entry *entry;
773 drm_dma_handle_t *dmah;
776 unsigned long offset;
780 unsigned long *temp_pagelist;
781 struct drm_buf **temp_buflist;
783 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
789 if (!capable(CAP_SYS_ADMIN))
792 count = request->count;
793 order = order_base_2(request->size);
796 DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
797 request->count, request->size, size, order);
799 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
802 alignment = (request->flags & _DRM_PAGE_ALIGN)
803 ? PAGE_ALIGN(size) : size;
804 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
805 total = PAGE_SIZE << page_order;
807 spin_lock(&dev->buf_lock);
809 spin_unlock(&dev->buf_lock);
812 atomic_inc(&dev->buf_alloc);
813 spin_unlock(&dev->buf_lock);
815 mutex_lock(&dev->struct_mutex);
816 entry = &dma->bufs[order];
817 if (entry->buf_count) {
818 mutex_unlock(&dev->struct_mutex);
819 atomic_dec(&dev->buf_alloc);
820 return -ENOMEM; /* May only call once for each order */
823 if (count < 0 || count > 4096) {
824 mutex_unlock(&dev->struct_mutex);
825 atomic_dec(&dev->buf_alloc);
829 entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
830 if (!entry->buflist) {
831 mutex_unlock(&dev->struct_mutex);
832 atomic_dec(&dev->buf_alloc);
836 entry->seglist = kzalloc(count * sizeof(*entry->seglist), GFP_KERNEL);
837 if (!entry->seglist) {
838 kfree(entry->buflist);
839 mutex_unlock(&dev->struct_mutex);
840 atomic_dec(&dev->buf_alloc);
844 /* Keep the original pagelist until we know all the allocations
847 temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
848 sizeof(*dma->pagelist), GFP_KERNEL);
849 if (!temp_pagelist) {
850 kfree(entry->buflist);
851 kfree(entry->seglist);
852 mutex_unlock(&dev->struct_mutex);
853 atomic_dec(&dev->buf_alloc);
856 memcpy(temp_pagelist,
857 dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
858 DRM_DEBUG("pagelist: %d entries\n",
859 dma->page_count + (count << page_order));
861 entry->buf_size = size;
862 entry->page_order = page_order;
866 while (entry->buf_count < count) {
868 dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000);
871 /* Set count correctly so we free the proper amount. */
872 entry->buf_count = count;
873 entry->seg_count = count;
874 drm_cleanup_buf_error(dev, entry);
875 kfree(temp_pagelist);
876 mutex_unlock(&dev->struct_mutex);
877 atomic_dec(&dev->buf_alloc);
880 entry->seglist[entry->seg_count++] = dmah;
881 for (i = 0; i < (1 << page_order); i++) {
882 DRM_DEBUG("page %d @ 0x%08lx\n",
883 dma->page_count + page_count,
884 (unsigned long)dmah->vaddr + PAGE_SIZE * i);
885 temp_pagelist[dma->page_count + page_count++]
886 = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
889 offset + size <= total && entry->buf_count < count;
890 offset += alignment, ++entry->buf_count) {
891 buf = &entry->buflist[entry->buf_count];
892 buf->idx = dma->buf_count + entry->buf_count;
893 buf->total = alignment;
896 buf->offset = (dma->byte_count + byte_count + offset);
897 buf->address = (void *)(dmah->vaddr + offset);
898 buf->bus_address = dmah->busaddr + offset;
902 buf->file_priv = NULL;
904 buf->dev_priv_size = dev->driver->dev_priv_size;
905 buf->dev_private = kzalloc(buf->dev_priv_size,
907 if (!buf->dev_private) {
908 /* Set count correctly so we free the proper amount. */
909 entry->buf_count = count;
910 entry->seg_count = count;
911 drm_cleanup_buf_error(dev, entry);
912 kfree(temp_pagelist);
913 mutex_unlock(&dev->struct_mutex);
914 atomic_dec(&dev->buf_alloc);
918 DRM_DEBUG("buffer %d @ %p\n",
919 entry->buf_count, buf->address);
921 byte_count += PAGE_SIZE << page_order;
924 temp_buflist = krealloc(dma->buflist,
925 (dma->buf_count + entry->buf_count) *
926 sizeof(*dma->buflist), GFP_KERNEL);
928 /* Free the entry because it isn't valid */
929 drm_cleanup_buf_error(dev, entry);
930 kfree(temp_pagelist);
931 mutex_unlock(&dev->struct_mutex);
932 atomic_dec(&dev->buf_alloc);
935 dma->buflist = temp_buflist;
937 for (i = 0; i < entry->buf_count; i++) {
938 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
941 /* No allocations failed, so now we can replace the original pagelist
944 if (dma->page_count) {
945 kfree(dma->pagelist);
947 dma->pagelist = temp_pagelist;
949 dma->buf_count += entry->buf_count;
950 dma->seg_count += entry->seg_count;
951 dma->page_count += entry->seg_count << page_order;
952 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
954 mutex_unlock(&dev->struct_mutex);
956 request->count = entry->buf_count;
957 request->size = size;
959 if (request->flags & _DRM_PCI_BUFFER_RO)
960 dma->flags = _DRM_DMA_USE_PCI_RO;
962 atomic_dec(&dev->buf_alloc);
966 EXPORT_SYMBOL(drm_legacy_addbufs_pci);
968 static int drm_legacy_addbufs_sg(struct drm_device *dev,
969 struct drm_buf_desc *request)
971 struct drm_device_dma *dma = dev->dma;
972 struct drm_buf_entry *entry;
974 unsigned long offset;
975 unsigned long agp_offset;
984 struct drm_buf **temp_buflist;
986 if (!drm_core_check_feature(dev, DRIVER_SG))
992 if (!capable(CAP_SYS_ADMIN))
995 count = request->count;
996 order = order_base_2(request->size);
999 alignment = (request->flags & _DRM_PAGE_ALIGN)
1000 ? PAGE_ALIGN(size) : size;
1001 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1002 total = PAGE_SIZE << page_order;
1005 agp_offset = request->agp_start;
1007 DRM_DEBUG("count: %d\n", count);
1008 DRM_DEBUG("order: %d\n", order);
1009 DRM_DEBUG("size: %d\n", size);
1010 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1011 DRM_DEBUG("alignment: %d\n", alignment);
1012 DRM_DEBUG("page_order: %d\n", page_order);
1013 DRM_DEBUG("total: %d\n", total);
1015 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1018 spin_lock(&dev->buf_lock);
1020 spin_unlock(&dev->buf_lock);
1023 atomic_inc(&dev->buf_alloc);
1024 spin_unlock(&dev->buf_lock);
1026 mutex_lock(&dev->struct_mutex);
1027 entry = &dma->bufs[order];
1028 if (entry->buf_count) {
1029 mutex_unlock(&dev->struct_mutex);
1030 atomic_dec(&dev->buf_alloc);
1031 return -ENOMEM; /* May only call once for each order */
1034 if (count < 0 || count > 4096) {
1035 mutex_unlock(&dev->struct_mutex);
1036 atomic_dec(&dev->buf_alloc);
1040 entry->buflist = kzalloc(count * sizeof(*entry->buflist),
1042 if (!entry->buflist) {
1043 mutex_unlock(&dev->struct_mutex);
1044 atomic_dec(&dev->buf_alloc);
1048 entry->buf_size = size;
1049 entry->page_order = page_order;
1053 while (entry->buf_count < count) {
1054 buf = &entry->buflist[entry->buf_count];
1055 buf->idx = dma->buf_count + entry->buf_count;
1056 buf->total = alignment;
1060 buf->offset = (dma->byte_count + offset);
1061 buf->bus_address = agp_offset + offset;
1062 buf->address = (void *)(agp_offset + offset
1063 + (unsigned long)dev->sg->virtual);
1067 buf->file_priv = NULL;
1069 buf->dev_priv_size = dev->driver->dev_priv_size;
1070 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
1071 if (!buf->dev_private) {
1072 /* Set count correctly so we free the proper amount. */
1073 entry->buf_count = count;
1074 drm_cleanup_buf_error(dev, entry);
1075 mutex_unlock(&dev->struct_mutex);
1076 atomic_dec(&dev->buf_alloc);
1080 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1082 offset += alignment;
1084 byte_count += PAGE_SIZE << page_order;
1087 DRM_DEBUG("byte_count: %d\n", byte_count);
1089 temp_buflist = krealloc(dma->buflist,
1090 (dma->buf_count + entry->buf_count) *
1091 sizeof(*dma->buflist), GFP_KERNEL);
1092 if (!temp_buflist) {
1093 /* Free the entry because it isn't valid */
1094 drm_cleanup_buf_error(dev, entry);
1095 mutex_unlock(&dev->struct_mutex);
1096 atomic_dec(&dev->buf_alloc);
1099 dma->buflist = temp_buflist;
1101 for (i = 0; i < entry->buf_count; i++) {
1102 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1105 dma->buf_count += entry->buf_count;
1106 dma->seg_count += entry->seg_count;
1107 dma->page_count += byte_count >> PAGE_SHIFT;
1108 dma->byte_count += byte_count;
1110 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1111 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1113 mutex_unlock(&dev->struct_mutex);
1115 request->count = entry->buf_count;
1116 request->size = size;
1118 dma->flags = _DRM_DMA_USE_SG;
1120 atomic_dec(&dev->buf_alloc);
1125 * Add buffers for DMA transfers (ioctl).
1127 * \param inode device inode.
1128 * \param file_priv DRM file private.
1129 * \param cmd command.
1130 * \param arg pointer to a struct drm_buf_desc request.
1131 * \return zero on success or a negative number on failure.
1133 * According with the memory type specified in drm_buf_desc::flags and the
1134 * build options, it dispatches the call either to addbufs_agp(),
1135 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1136 * PCI memory respectively.
1138 int drm_legacy_addbufs(struct drm_device *dev, void *data,
1139 struct drm_file *file_priv)
1141 struct drm_buf_desc *request = data;
1144 if (drm_core_check_feature(dev, DRIVER_MODESET))
1147 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1150 #if IS_ENABLED(CONFIG_AGP)
1151 if (request->flags & _DRM_AGP_BUFFER)
1152 ret = drm_legacy_addbufs_agp(dev, request);
1155 if (request->flags & _DRM_SG_BUFFER)
1156 ret = drm_legacy_addbufs_sg(dev, request);
1157 else if (request->flags & _DRM_FB_BUFFER)
1160 ret = drm_legacy_addbufs_pci(dev, request);
1166 * Get information about the buffer mappings.
1168 * This was originally mean for debugging purposes, or by a sophisticated
1169 * client library to determine how best to use the available buffers (e.g.,
1170 * large buffers can be used for image transfer).
1172 * \param inode device inode.
1173 * \param file_priv DRM file private.
1174 * \param cmd command.
1175 * \param arg pointer to a drm_buf_info structure.
1176 * \return zero on success or a negative number on failure.
1178 * Increments drm_device::buf_use while holding the drm_device::buf_lock
1179 * lock, preventing of allocating more buffers after this call. Information
1180 * about each requested buffer is then copied into user space.
1182 int drm_legacy_infobufs(struct drm_device *dev, void *data,
1183 struct drm_file *file_priv)
1185 struct drm_device_dma *dma = dev->dma;
1186 struct drm_buf_info *request = data;
1190 if (drm_core_check_feature(dev, DRIVER_MODESET))
1193 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1199 spin_lock(&dev->buf_lock);
1200 if (atomic_read(&dev->buf_alloc)) {
1201 spin_unlock(&dev->buf_lock);
1204 ++dev->buf_use; /* Can't allocate more after this call */
1205 spin_unlock(&dev->buf_lock);
1207 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1208 if (dma->bufs[i].buf_count)
1212 DRM_DEBUG("count = %d\n", count);
1214 if (request->count >= count) {
1215 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1216 if (dma->bufs[i].buf_count) {
1217 struct drm_buf_desc __user *to =
1218 &request->list[count];
1219 struct drm_buf_entry *from = &dma->bufs[i];
1220 if (copy_to_user(&to->count,
1222 sizeof(from->buf_count)) ||
1223 copy_to_user(&to->size,
1225 sizeof(from->buf_size)) ||
1226 copy_to_user(&to->low_mark,
1228 sizeof(from->low_mark)) ||
1229 copy_to_user(&to->high_mark,
1231 sizeof(from->high_mark)))
1234 DRM_DEBUG("%d %d %d %d %d\n",
1236 dma->bufs[i].buf_count,
1237 dma->bufs[i].buf_size,
1238 dma->bufs[i].low_mark,
1239 dma->bufs[i].high_mark);
1244 request->count = count;
1250 * Specifies a low and high water mark for buffer allocation
1252 * \param inode device inode.
1253 * \param file_priv DRM file private.
1254 * \param cmd command.
1255 * \param arg a pointer to a drm_buf_desc structure.
1256 * \return zero on success or a negative number on failure.
1258 * Verifies that the size order is bounded between the admissible orders and
1259 * updates the respective drm_device_dma::bufs entry low and high water mark.
1261 * \note This ioctl is deprecated and mostly never used.
1263 int drm_legacy_markbufs(struct drm_device *dev, void *data,
1264 struct drm_file *file_priv)
1266 struct drm_device_dma *dma = dev->dma;
1267 struct drm_buf_desc *request = data;
1269 struct drm_buf_entry *entry;
1271 if (drm_core_check_feature(dev, DRIVER_MODESET))
1274 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1280 DRM_DEBUG("%d, %d, %d\n",
1281 request->size, request->low_mark, request->high_mark);
1282 order = order_base_2(request->size);
1283 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1285 entry = &dma->bufs[order];
1287 if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1289 if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1292 entry->low_mark = request->low_mark;
1293 entry->high_mark = request->high_mark;
1299 * Unreserve the buffers in list, previously reserved using drmDMA.
1301 * \param inode device inode.
1302 * \param file_priv DRM file private.
1303 * \param cmd command.
1304 * \param arg pointer to a drm_buf_free structure.
1305 * \return zero on success or a negative number on failure.
1307 * Calls free_buffer() for each used buffer.
1308 * This function is primarily used for debugging.
1310 int drm_legacy_freebufs(struct drm_device *dev, void *data,
1311 struct drm_file *file_priv)
1313 struct drm_device_dma *dma = dev->dma;
1314 struct drm_buf_free *request = data;
1317 struct drm_buf *buf;
1319 if (drm_core_check_feature(dev, DRIVER_MODESET))
1322 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1328 DRM_DEBUG("%d\n", request->count);
1329 for (i = 0; i < request->count; i++) {
1330 if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
1332 if (idx < 0 || idx >= dma->buf_count) {
1333 DRM_ERROR("Index %d (of %d max)\n",
1334 idx, dma->buf_count - 1);
1337 idx = array_index_nospec(idx, dma->buf_count);
1338 buf = dma->buflist[idx];
1339 if (buf->file_priv != file_priv) {
1340 DRM_ERROR("Process %d freeing buffer not owned\n",
1341 task_pid_nr(current));
1344 drm_legacy_free_buffer(dev, buf);
1351 * Maps all of the DMA buffers into client-virtual space (ioctl).
1353 * \param inode device inode.
1354 * \param file_priv DRM file private.
1355 * \param cmd command.
1356 * \param arg pointer to a drm_buf_map structure.
1357 * \return zero on success or a negative number on failure.
1359 * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
1360 * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
1361 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1364 int drm_legacy_mapbufs(struct drm_device *dev, void *data,
1365 struct drm_file *file_priv)
1367 struct drm_device_dma *dma = dev->dma;
1370 unsigned long virtual;
1371 unsigned long address;
1372 struct drm_buf_map *request = data;
1375 if (drm_core_check_feature(dev, DRIVER_MODESET))
1378 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1384 spin_lock(&dev->buf_lock);
1385 if (atomic_read(&dev->buf_alloc)) {
1386 spin_unlock(&dev->buf_lock);
1389 dev->buf_use++; /* Can't allocate more after this call */
1390 spin_unlock(&dev->buf_lock);
1392 if (request->count >= dma->buf_count) {
1393 if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP))
1394 || (drm_core_check_feature(dev, DRIVER_SG)
1395 && (dma->flags & _DRM_DMA_USE_SG))) {
1396 struct drm_local_map *map = dev->agp_buffer_map;
1397 unsigned long token = dev->agp_buffer_token;
1403 virtual = vm_mmap(file_priv->filp, 0, map->size,
1404 PROT_READ | PROT_WRITE,
1408 virtual = vm_mmap(file_priv->filp, 0, dma->byte_count,
1409 PROT_READ | PROT_WRITE,
1412 if (virtual > -1024UL) {
1414 retcode = (signed long)virtual;
1417 request->virtual = (void __user *)virtual;
1419 for (i = 0; i < dma->buf_count; i++) {
1420 if (copy_to_user(&request->list[i].idx,
1421 &dma->buflist[i]->idx,
1422 sizeof(request->list[0].idx))) {
1426 if (copy_to_user(&request->list[i].total,
1427 &dma->buflist[i]->total,
1428 sizeof(request->list[0].total))) {
1432 if (copy_to_user(&request->list[i].used,
1433 &zero, sizeof(zero))) {
1437 address = virtual + dma->buflist[i]->offset; /* *** */
1438 if (copy_to_user(&request->list[i].address,
1439 &address, sizeof(address))) {
1446 request->count = dma->buf_count;
1447 DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1452 int drm_legacy_dma_ioctl(struct drm_device *dev, void *data,
1453 struct drm_file *file_priv)
1455 if (drm_core_check_feature(dev, DRIVER_MODESET))
1458 if (dev->driver->dma_ioctl)
1459 return dev->driver->dma_ioctl(dev, data, file_priv);
1464 struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev)
1466 struct drm_map_list *entry;
1468 list_for_each_entry(entry, &dev->maplist, head) {
1469 if (entry->map && entry->map->type == _DRM_SHM &&
1470 (entry->map->flags & _DRM_CONTAINS_LOCK)) {
1476 EXPORT_SYMBOL(drm_legacy_getsarea);