2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/spinlock.h>
19 #include <linux/shmem_fs.h>
20 #include <linux/dma-buf.h>
21 #include <linux/pfn_t.h>
24 #include "msm_fence.h"
29 static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
32 static dma_addr_t physaddr(struct drm_gem_object *obj)
34 struct msm_gem_object *msm_obj = to_msm_bo(obj);
35 struct msm_drm_private *priv = obj->dev->dev_private;
36 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
40 static bool use_pages(struct drm_gem_object *obj)
42 struct msm_gem_object *msm_obj = to_msm_bo(obj);
43 return !msm_obj->vram_node;
47 * Cache sync.. this is a bit over-complicated, to fit dma-mapping
48 * API. Really GPU cache is out of scope here (handled on cmdstream)
49 * and all we need to do is invalidate newly allocated pages before
50 * mapping to CPU as uncached/writecombine.
52 * On top of this, we have the added headache, that depending on
53 * display generation, the display's iommu may be wired up to either
54 * the toplevel drm device (mdss), or to the mdp sub-node, meaning
55 * that here we either have dma-direct or iommu ops.
57 * Let this be a cautionary tail of abstraction gone wrong.
60 static void sync_for_device(struct msm_gem_object *msm_obj)
62 struct device *dev = msm_obj->base.dev->dev;
64 if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
65 dma_sync_sg_for_device(dev, msm_obj->sgt->sgl,
66 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
68 dma_map_sg(dev, msm_obj->sgt->sgl,
69 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
73 static void sync_for_cpu(struct msm_gem_object *msm_obj)
75 struct device *dev = msm_obj->base.dev->dev;
77 if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
78 dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl,
79 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
81 dma_unmap_sg(dev, msm_obj->sgt->sgl,
82 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
86 /* allocate pages from VRAM carveout, used when no IOMMU: */
87 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
89 struct msm_gem_object *msm_obj = to_msm_bo(obj);
90 struct msm_drm_private *priv = obj->dev->dev_private;
95 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
97 return ERR_PTR(-ENOMEM);
99 spin_lock(&priv->vram.lock);
100 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
101 spin_unlock(&priv->vram.lock);
107 paddr = physaddr(obj);
108 for (i = 0; i < npages; i++) {
109 p[i] = phys_to_page(paddr);
116 static struct page **get_pages(struct drm_gem_object *obj)
118 struct msm_gem_object *msm_obj = to_msm_bo(obj);
120 if (!msm_obj->pages) {
121 struct drm_device *dev = obj->dev;
123 int npages = obj->size >> PAGE_SHIFT;
126 p = drm_gem_get_pages(obj);
128 p = get_pages_vram(obj, npages);
131 dev_err(dev->dev, "could not get pages: %ld\n",
138 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
139 if (IS_ERR(msm_obj->sgt)) {
140 void *ptr = ERR_CAST(msm_obj->sgt);
142 dev_err(dev->dev, "failed to allocate sgt\n");
147 /* For non-cached buffers, ensure the new pages are clean
148 * because display controller, GPU, etc. are not coherent:
150 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
151 sync_for_device(msm_obj);
154 return msm_obj->pages;
157 static void put_pages_vram(struct drm_gem_object *obj)
159 struct msm_gem_object *msm_obj = to_msm_bo(obj);
160 struct msm_drm_private *priv = obj->dev->dev_private;
162 spin_lock(&priv->vram.lock);
163 drm_mm_remove_node(msm_obj->vram_node);
164 spin_unlock(&priv->vram.lock);
166 kvfree(msm_obj->pages);
169 static void put_pages(struct drm_gem_object *obj)
171 struct msm_gem_object *msm_obj = to_msm_bo(obj);
173 if (msm_obj->pages) {
175 /* For non-cached buffers, ensure the new
176 * pages are clean because display controller,
177 * GPU, etc. are not coherent:
179 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
180 sync_for_cpu(msm_obj);
182 sg_free_table(msm_obj->sgt);
187 drm_gem_put_pages(obj, msm_obj->pages, true, false);
191 msm_obj->pages = NULL;
195 struct page **msm_gem_get_pages(struct drm_gem_object *obj)
197 struct msm_gem_object *msm_obj = to_msm_bo(obj);
200 mutex_lock(&msm_obj->lock);
202 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
203 mutex_unlock(&msm_obj->lock);
204 return ERR_PTR(-EBUSY);
208 mutex_unlock(&msm_obj->lock);
212 void msm_gem_put_pages(struct drm_gem_object *obj)
214 /* when we start tracking the pin count, then do something here */
217 int msm_gem_mmap_obj(struct drm_gem_object *obj,
218 struct vm_area_struct *vma)
220 struct msm_gem_object *msm_obj = to_msm_bo(obj);
222 vma->vm_flags &= ~VM_PFNMAP;
223 vma->vm_flags |= VM_MIXEDMAP;
225 if (msm_obj->flags & MSM_BO_WC) {
226 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
227 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
228 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
231 * Shunt off cached objs to shmem file so they have their own
232 * address_space (so unmap_mapping_range does what we want,
233 * in particular in the case of mmap'd dmabufs)
238 vma->vm_file = obj->filp;
240 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
246 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
250 ret = drm_gem_mmap(filp, vma);
252 DBG("mmap failed: %d", ret);
256 return msm_gem_mmap_obj(vma->vm_private_data, vma);
259 vm_fault_t msm_gem_fault(struct vm_fault *vmf)
261 struct vm_area_struct *vma = vmf->vma;
262 struct drm_gem_object *obj = vma->vm_private_data;
263 struct msm_gem_object *msm_obj = to_msm_bo(obj);
271 * vm_ops.open/drm_gem_mmap_obj and close get and put
272 * a reference on obj. So, we dont need to hold one here.
274 err = mutex_lock_interruptible(&msm_obj->lock);
276 ret = VM_FAULT_NOPAGE;
280 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
281 mutex_unlock(&msm_obj->lock);
282 return VM_FAULT_SIGBUS;
285 /* make sure we have pages attached now */
286 pages = get_pages(obj);
288 ret = vmf_error(PTR_ERR(pages));
292 /* We don't use vmf->pgoff since that has the fake offset: */
293 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
295 pfn = page_to_pfn(pages[pgoff]);
297 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
298 pfn, pfn << PAGE_SHIFT);
300 ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
302 mutex_unlock(&msm_obj->lock);
307 /** get mmap offset */
308 static uint64_t mmap_offset(struct drm_gem_object *obj)
310 struct drm_device *dev = obj->dev;
311 struct msm_gem_object *msm_obj = to_msm_bo(obj);
314 WARN_ON(!mutex_is_locked(&msm_obj->lock));
316 /* Make it mmapable */
317 ret = drm_gem_create_mmap_offset(obj);
320 dev_err(dev->dev, "could not allocate mmap offset\n");
324 return drm_vma_node_offset_addr(&obj->vma_node);
327 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
330 struct msm_gem_object *msm_obj = to_msm_bo(obj);
332 mutex_lock(&msm_obj->lock);
333 offset = mmap_offset(obj);
334 mutex_unlock(&msm_obj->lock);
338 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
339 struct msm_gem_address_space *aspace)
341 struct msm_gem_object *msm_obj = to_msm_bo(obj);
342 struct msm_gem_vma *vma;
344 WARN_ON(!mutex_is_locked(&msm_obj->lock));
346 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
348 return ERR_PTR(-ENOMEM);
350 vma->aspace = aspace;
352 list_add_tail(&vma->list, &msm_obj->vmas);
357 static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
358 struct msm_gem_address_space *aspace)
360 struct msm_gem_object *msm_obj = to_msm_bo(obj);
361 struct msm_gem_vma *vma;
363 WARN_ON(!mutex_is_locked(&msm_obj->lock));
365 list_for_each_entry(vma, &msm_obj->vmas, list) {
366 if (vma->aspace == aspace)
373 static void del_vma(struct msm_gem_vma *vma)
378 list_del(&vma->list);
382 /* Called with msm_obj->lock locked */
384 put_iova(struct drm_gem_object *obj)
386 struct msm_gem_object *msm_obj = to_msm_bo(obj);
387 struct msm_gem_vma *vma, *tmp;
389 WARN_ON(!mutex_is_locked(&msm_obj->lock));
391 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
392 msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt);
397 /* get iova, taking a reference. Should have a matching put */
398 int msm_gem_get_iova(struct drm_gem_object *obj,
399 struct msm_gem_address_space *aspace, uint64_t *iova)
401 struct msm_gem_object *msm_obj = to_msm_bo(obj);
402 struct msm_gem_vma *vma;
405 mutex_lock(&msm_obj->lock);
407 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
408 mutex_unlock(&msm_obj->lock);
412 vma = lookup_vma(obj, aspace);
417 vma = add_vma(obj, aspace);
423 pages = get_pages(obj);
425 ret = PTR_ERR(pages);
429 ret = msm_gem_map_vma(aspace, vma, msm_obj->sgt,
430 obj->size >> PAGE_SHIFT);
437 mutex_unlock(&msm_obj->lock);
443 mutex_unlock(&msm_obj->lock);
447 /* get iova without taking a reference, used in places where you have
448 * already done a 'msm_gem_get_iova()'.
450 uint64_t msm_gem_iova(struct drm_gem_object *obj,
451 struct msm_gem_address_space *aspace)
453 struct msm_gem_object *msm_obj = to_msm_bo(obj);
454 struct msm_gem_vma *vma;
456 mutex_lock(&msm_obj->lock);
457 vma = lookup_vma(obj, aspace);
458 mutex_unlock(&msm_obj->lock);
461 return vma ? vma->iova : 0;
464 void msm_gem_put_iova(struct drm_gem_object *obj,
465 struct msm_gem_address_space *aspace)
468 // NOTE: probably don't need a _locked() version.. we wouldn't
469 // normally unmap here, but instead just mark that it could be
470 // unmapped (if the iova refcnt drops to zero), but then later
471 // if another _get_iova_locked() fails we can start unmapping
472 // things that are no longer needed..
475 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
476 struct drm_mode_create_dumb *args)
478 args->pitch = align_pitch(args->width, args->bpp);
479 args->size = PAGE_ALIGN(args->pitch * args->height);
480 return msm_gem_new_handle(dev, file, args->size,
481 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
484 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
485 uint32_t handle, uint64_t *offset)
487 struct drm_gem_object *obj;
490 /* GEM does all our handle to object mapping */
491 obj = drm_gem_object_lookup(file, handle);
497 *offset = msm_gem_mmap_offset(obj);
499 drm_gem_object_put_unlocked(obj);
505 static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
507 struct msm_gem_object *msm_obj = to_msm_bo(obj);
510 mutex_lock(&msm_obj->lock);
512 if (WARN_ON(msm_obj->madv > madv)) {
513 dev_err(obj->dev->dev, "Invalid madv state: %u vs %u\n",
514 msm_obj->madv, madv);
515 mutex_unlock(&msm_obj->lock);
516 return ERR_PTR(-EBUSY);
519 /* increment vmap_count *before* vmap() call, so shrinker can
520 * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
521 * This guarantees that we won't try to msm_gem_vunmap() this
522 * same object from within the vmap() call (while we already
523 * hold msm_obj->lock)
525 msm_obj->vmap_count++;
527 if (!msm_obj->vaddr) {
528 struct page **pages = get_pages(obj);
530 ret = PTR_ERR(pages);
533 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
534 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
535 if (msm_obj->vaddr == NULL) {
541 mutex_unlock(&msm_obj->lock);
542 return msm_obj->vaddr;
545 msm_obj->vmap_count--;
546 mutex_unlock(&msm_obj->lock);
550 void *msm_gem_get_vaddr(struct drm_gem_object *obj)
552 return get_vaddr(obj, MSM_MADV_WILLNEED);
556 * Don't use this! It is for the very special case of dumping
557 * submits from GPU hangs or faults, were the bo may already
558 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
561 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
563 return get_vaddr(obj, __MSM_MADV_PURGED);
566 void msm_gem_put_vaddr(struct drm_gem_object *obj)
568 struct msm_gem_object *msm_obj = to_msm_bo(obj);
570 mutex_lock(&msm_obj->lock);
571 WARN_ON(msm_obj->vmap_count < 1);
572 msm_obj->vmap_count--;
573 mutex_unlock(&msm_obj->lock);
576 /* Update madvise status, returns true if not purged, else
579 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
581 struct msm_gem_object *msm_obj = to_msm_bo(obj);
583 mutex_lock(&msm_obj->lock);
585 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
587 if (msm_obj->madv != __MSM_MADV_PURGED)
588 msm_obj->madv = madv;
590 madv = msm_obj->madv;
592 mutex_unlock(&msm_obj->lock);
594 return (madv != __MSM_MADV_PURGED);
597 void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
599 struct drm_device *dev = obj->dev;
600 struct msm_gem_object *msm_obj = to_msm_bo(obj);
602 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
603 WARN_ON(!is_purgeable(msm_obj));
604 WARN_ON(obj->import_attach);
606 mutex_lock_nested(&msm_obj->lock, subclass);
610 msm_gem_vunmap_locked(obj);
614 msm_obj->madv = __MSM_MADV_PURGED;
616 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
617 drm_gem_free_mmap_offset(obj);
619 /* Our goal here is to return as much of the memory as
620 * is possible back to the system as we are called from OOM.
621 * To do this we must instruct the shmfs to drop all of its
622 * backing pages, *now*.
624 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
626 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
629 mutex_unlock(&msm_obj->lock);
632 static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
634 struct msm_gem_object *msm_obj = to_msm_bo(obj);
636 WARN_ON(!mutex_is_locked(&msm_obj->lock));
638 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
641 vunmap(msm_obj->vaddr);
642 msm_obj->vaddr = NULL;
645 void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
647 struct msm_gem_object *msm_obj = to_msm_bo(obj);
649 mutex_lock_nested(&msm_obj->lock, subclass);
650 msm_gem_vunmap_locked(obj);
651 mutex_unlock(&msm_obj->lock);
654 /* must be called before _move_to_active().. */
655 int msm_gem_sync_object(struct drm_gem_object *obj,
656 struct msm_fence_context *fctx, bool exclusive)
658 struct msm_gem_object *msm_obj = to_msm_bo(obj);
659 struct reservation_object_list *fobj;
660 struct dma_fence *fence;
663 fobj = reservation_object_get_list(msm_obj->resv);
664 if (!fobj || (fobj->shared_count == 0)) {
665 fence = reservation_object_get_excl(msm_obj->resv);
666 /* don't need to wait on our own fences, since ring is fifo */
667 if (fence && (fence->context != fctx->context)) {
668 ret = dma_fence_wait(fence, true);
674 if (!exclusive || !fobj)
677 for (i = 0; i < fobj->shared_count; i++) {
678 fence = rcu_dereference_protected(fobj->shared[i],
679 reservation_object_held(msm_obj->resv));
680 if (fence->context != fctx->context) {
681 ret = dma_fence_wait(fence, true);
690 void msm_gem_move_to_active(struct drm_gem_object *obj,
691 struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
693 struct msm_gem_object *msm_obj = to_msm_bo(obj);
694 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
697 reservation_object_add_excl_fence(msm_obj->resv, fence);
699 reservation_object_add_shared_fence(msm_obj->resv, fence);
700 list_del_init(&msm_obj->mm_list);
701 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
704 void msm_gem_move_to_inactive(struct drm_gem_object *obj)
706 struct drm_device *dev = obj->dev;
707 struct msm_drm_private *priv = dev->dev_private;
708 struct msm_gem_object *msm_obj = to_msm_bo(obj);
710 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
713 list_del_init(&msm_obj->mm_list);
714 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
717 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
719 struct msm_gem_object *msm_obj = to_msm_bo(obj);
720 bool write = !!(op & MSM_PREP_WRITE);
721 unsigned long remain =
722 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
725 ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
728 return remain == 0 ? -EBUSY : -ETIMEDOUT;
732 /* TODO cache maintenance */
737 int msm_gem_cpu_fini(struct drm_gem_object *obj)
739 /* TODO cache maintenance */
743 #ifdef CONFIG_DEBUG_FS
744 static void describe_fence(struct dma_fence *fence, const char *type,
747 if (!dma_fence_is_signaled(fence))
748 seq_printf(m, "\t%9s: %s %s seq %u\n", type,
749 fence->ops->get_driver_name(fence),
750 fence->ops->get_timeline_name(fence),
754 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
756 struct msm_gem_object *msm_obj = to_msm_bo(obj);
757 struct reservation_object *robj = msm_obj->resv;
758 struct reservation_object_list *fobj;
759 struct dma_fence *fence;
760 struct msm_gem_vma *vma;
761 uint64_t off = drm_vma_node_start(&obj->vma_node);
764 mutex_lock(&msm_obj->lock);
766 switch (msm_obj->madv) {
767 case __MSM_MADV_PURGED:
770 case MSM_MADV_DONTNEED:
773 case MSM_MADV_WILLNEED:
779 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t",
780 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
781 obj->name, kref_read(&obj->refcount),
782 off, msm_obj->vaddr);
784 /* FIXME: we need to print the address space here too */
785 list_for_each_entry(vma, &msm_obj->vmas, list)
786 seq_printf(m, " %08llx", vma->iova);
788 seq_printf(m, " %zu%s\n", obj->size, madv);
791 fobj = rcu_dereference(robj->fence);
793 unsigned int i, shared_count = fobj->shared_count;
795 for (i = 0; i < shared_count; i++) {
796 fence = rcu_dereference(fobj->shared[i]);
797 describe_fence(fence, "Shared", m);
801 fence = rcu_dereference(robj->fence_excl);
803 describe_fence(fence, "Exclusive", m);
806 mutex_unlock(&msm_obj->lock);
809 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
811 struct msm_gem_object *msm_obj;
815 list_for_each_entry(msm_obj, list, mm_list) {
816 struct drm_gem_object *obj = &msm_obj->base;
818 msm_gem_describe(obj, m);
823 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
827 /* don't call directly! Use drm_gem_object_put() and friends */
828 void msm_gem_free_object(struct drm_gem_object *obj)
830 struct drm_device *dev = obj->dev;
831 struct msm_gem_object *msm_obj = to_msm_bo(obj);
833 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
835 /* object should not be on active list: */
836 WARN_ON(is_active(msm_obj));
838 list_del(&msm_obj->mm_list);
840 mutex_lock(&msm_obj->lock);
844 if (obj->import_attach) {
846 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
848 /* Don't drop the pages for imported dmabuf, as they are not
849 * ours, just free the array we allocated:
852 kvfree(msm_obj->pages);
854 drm_prime_gem_destroy(obj, msm_obj->sgt);
856 msm_gem_vunmap_locked(obj);
860 if (msm_obj->resv == &msm_obj->_resv)
861 reservation_object_fini(msm_obj->resv);
863 drm_gem_object_release(obj);
865 mutex_unlock(&msm_obj->lock);
869 /* convenience method to construct a GEM buffer object, and userspace handle */
870 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
871 uint32_t size, uint32_t flags, uint32_t *handle)
873 struct drm_gem_object *obj;
876 obj = msm_gem_new(dev, size, flags);
881 ret = drm_gem_handle_create(file, obj, handle);
883 /* drop reference from allocate - handle holds it now */
884 drm_gem_object_put_unlocked(obj);
889 static int msm_gem_new_impl(struct drm_device *dev,
890 uint32_t size, uint32_t flags,
891 struct reservation_object *resv,
892 struct drm_gem_object **obj,
893 bool struct_mutex_locked)
895 struct msm_drm_private *priv = dev->dev_private;
896 struct msm_gem_object *msm_obj;
898 switch (flags & MSM_BO_CACHE_MASK) {
899 case MSM_BO_UNCACHED:
904 dev_err(dev->dev, "invalid cache flag: %x\n",
905 (flags & MSM_BO_CACHE_MASK));
909 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
913 mutex_init(&msm_obj->lock);
915 msm_obj->flags = flags;
916 msm_obj->madv = MSM_MADV_WILLNEED;
919 msm_obj->resv = resv;
921 msm_obj->resv = &msm_obj->_resv;
922 reservation_object_init(msm_obj->resv);
925 INIT_LIST_HEAD(&msm_obj->submit_entry);
926 INIT_LIST_HEAD(&msm_obj->vmas);
928 if (struct_mutex_locked) {
929 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
930 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
932 mutex_lock(&dev->struct_mutex);
933 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
934 mutex_unlock(&dev->struct_mutex);
937 *obj = &msm_obj->base;
942 static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
943 uint32_t size, uint32_t flags, bool struct_mutex_locked)
945 struct msm_drm_private *priv = dev->dev_private;
946 struct drm_gem_object *obj = NULL;
947 bool use_vram = false;
950 size = PAGE_ALIGN(size);
952 if (!iommu_present(&platform_bus_type))
954 else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
957 if (WARN_ON(use_vram && !priv->vram.size))
958 return ERR_PTR(-EINVAL);
960 /* Disallow zero sized objects as they make the underlying
961 * infrastructure grumpy
964 return ERR_PTR(-EINVAL);
966 ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked);
971 struct msm_gem_vma *vma;
973 struct msm_gem_object *msm_obj = to_msm_bo(obj);
975 mutex_lock(&msm_obj->lock);
977 vma = add_vma(obj, NULL);
978 mutex_unlock(&msm_obj->lock);
984 to_msm_bo(obj)->vram_node = &vma->node;
986 drm_gem_private_object_init(dev, obj, size);
988 pages = get_pages(obj);
990 ret = PTR_ERR(pages);
994 vma->iova = physaddr(obj);
996 ret = drm_gem_object_init(dev, obj, size);
1004 drm_gem_object_put_unlocked(obj);
1005 return ERR_PTR(ret);
1008 struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
1009 uint32_t size, uint32_t flags)
1011 return _msm_gem_new(dev, size, flags, true);
1014 struct drm_gem_object *msm_gem_new(struct drm_device *dev,
1015 uint32_t size, uint32_t flags)
1017 return _msm_gem_new(dev, size, flags, false);
1020 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1021 struct dma_buf *dmabuf, struct sg_table *sgt)
1023 struct msm_gem_object *msm_obj;
1024 struct drm_gem_object *obj;
1028 /* if we don't have IOMMU, don't bother pretending we can import: */
1029 if (!iommu_present(&platform_bus_type)) {
1030 dev_err(dev->dev, "cannot import without IOMMU\n");
1031 return ERR_PTR(-EINVAL);
1034 size = PAGE_ALIGN(dmabuf->size);
1036 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false);
1040 drm_gem_private_object_init(dev, obj, size);
1042 npages = size / PAGE_SIZE;
1044 msm_obj = to_msm_bo(obj);
1045 mutex_lock(&msm_obj->lock);
1047 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1048 if (!msm_obj->pages) {
1049 mutex_unlock(&msm_obj->lock);
1054 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
1056 mutex_unlock(&msm_obj->lock);
1060 mutex_unlock(&msm_obj->lock);
1064 drm_gem_object_put_unlocked(obj);
1065 return ERR_PTR(ret);
1068 static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1069 uint32_t flags, struct msm_gem_address_space *aspace,
1070 struct drm_gem_object **bo, uint64_t *iova, bool locked)
1073 struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
1077 return ERR_CAST(obj);
1080 ret = msm_gem_get_iova(obj, aspace, iova);
1082 drm_gem_object_put(obj);
1083 return ERR_PTR(ret);
1087 vaddr = msm_gem_get_vaddr(obj);
1088 if (IS_ERR(vaddr)) {
1089 msm_gem_put_iova(obj, aspace);
1090 drm_gem_object_put(obj);
1091 return ERR_CAST(vaddr);
1100 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1101 uint32_t flags, struct msm_gem_address_space *aspace,
1102 struct drm_gem_object **bo, uint64_t *iova)
1104 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
1107 void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
1108 uint32_t flags, struct msm_gem_address_space *aspace,
1109 struct drm_gem_object **bo, uint64_t *iova)
1111 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);