2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/spinlock.h>
19 #include <linux/shmem_fs.h>
20 #include <linux/dma-buf.h>
21 #include <linux/pfn_t.h>
24 #include "msm_fence.h"
29 static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
32 static dma_addr_t physaddr(struct drm_gem_object *obj)
34 struct msm_gem_object *msm_obj = to_msm_bo(obj);
35 struct msm_drm_private *priv = obj->dev->dev_private;
36 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
40 static bool use_pages(struct drm_gem_object *obj)
42 struct msm_gem_object *msm_obj = to_msm_bo(obj);
43 return !msm_obj->vram_node;
47 * Cache sync.. this is a bit over-complicated, to fit dma-mapping
48 * API. Really GPU cache is out of scope here (handled on cmdstream)
49 * and all we need to do is invalidate newly allocated pages before
50 * mapping to CPU as uncached/writecombine.
52 * On top of this, we have the added headache, that depending on
53 * display generation, the display's iommu may be wired up to either
54 * the toplevel drm device (mdss), or to the mdp sub-node, meaning
55 * that here we either have dma-direct or iommu ops.
57 * Let this be a cautionary tail of abstraction gone wrong.
60 static void sync_for_device(struct msm_gem_object *msm_obj)
62 struct device *dev = msm_obj->base.dev->dev;
64 if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
65 dma_sync_sg_for_device(dev, msm_obj->sgt->sgl,
66 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
68 dma_map_sg(dev, msm_obj->sgt->sgl,
69 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
73 static void sync_for_cpu(struct msm_gem_object *msm_obj)
75 struct device *dev = msm_obj->base.dev->dev;
77 if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
78 dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl,
79 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
81 dma_unmap_sg(dev, msm_obj->sgt->sgl,
82 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
86 /* allocate pages from VRAM carveout, used when no IOMMU: */
87 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
89 struct msm_gem_object *msm_obj = to_msm_bo(obj);
90 struct msm_drm_private *priv = obj->dev->dev_private;
95 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
97 return ERR_PTR(-ENOMEM);
99 spin_lock(&priv->vram.lock);
100 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
101 spin_unlock(&priv->vram.lock);
107 paddr = physaddr(obj);
108 for (i = 0; i < npages; i++) {
109 p[i] = phys_to_page(paddr);
116 static struct page **get_pages(struct drm_gem_object *obj)
118 struct msm_gem_object *msm_obj = to_msm_bo(obj);
120 if (!msm_obj->pages) {
121 struct drm_device *dev = obj->dev;
123 int npages = obj->size >> PAGE_SHIFT;
126 p = drm_gem_get_pages(obj);
128 p = get_pages_vram(obj, npages);
131 dev_err(dev->dev, "could not get pages: %ld\n",
138 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
139 if (IS_ERR(msm_obj->sgt)) {
140 void *ptr = ERR_CAST(msm_obj->sgt);
142 dev_err(dev->dev, "failed to allocate sgt\n");
147 /* For non-cached buffers, ensure the new pages are clean
148 * because display controller, GPU, etc. are not coherent:
150 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
151 sync_for_device(msm_obj);
154 return msm_obj->pages;
157 static void put_pages_vram(struct drm_gem_object *obj)
159 struct msm_gem_object *msm_obj = to_msm_bo(obj);
160 struct msm_drm_private *priv = obj->dev->dev_private;
162 spin_lock(&priv->vram.lock);
163 drm_mm_remove_node(msm_obj->vram_node);
164 spin_unlock(&priv->vram.lock);
166 kvfree(msm_obj->pages);
169 static void put_pages(struct drm_gem_object *obj)
171 struct msm_gem_object *msm_obj = to_msm_bo(obj);
173 if (msm_obj->pages) {
175 /* For non-cached buffers, ensure the new
176 * pages are clean because display controller,
177 * GPU, etc. are not coherent:
179 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
180 sync_for_cpu(msm_obj);
182 sg_free_table(msm_obj->sgt);
187 drm_gem_put_pages(obj, msm_obj->pages, true, false);
191 msm_obj->pages = NULL;
195 struct page **msm_gem_get_pages(struct drm_gem_object *obj)
197 struct msm_gem_object *msm_obj = to_msm_bo(obj);
200 mutex_lock(&msm_obj->lock);
202 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
203 mutex_unlock(&msm_obj->lock);
204 return ERR_PTR(-EBUSY);
208 mutex_unlock(&msm_obj->lock);
212 void msm_gem_put_pages(struct drm_gem_object *obj)
214 /* when we start tracking the pin count, then do something here */
217 int msm_gem_mmap_obj(struct drm_gem_object *obj,
218 struct vm_area_struct *vma)
220 struct msm_gem_object *msm_obj = to_msm_bo(obj);
222 vma->vm_flags &= ~VM_PFNMAP;
223 vma->vm_flags |= VM_MIXEDMAP;
225 if (msm_obj->flags & MSM_BO_WC) {
226 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
227 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
228 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
231 * Shunt off cached objs to shmem file so they have their own
232 * address_space (so unmap_mapping_range does what we want,
233 * in particular in the case of mmap'd dmabufs)
238 vma->vm_file = obj->filp;
240 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
246 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
250 ret = drm_gem_mmap(filp, vma);
252 DBG("mmap failed: %d", ret);
256 return msm_gem_mmap_obj(vma->vm_private_data, vma);
259 int msm_gem_fault(struct vm_fault *vmf)
261 struct vm_area_struct *vma = vmf->vma;
262 struct drm_gem_object *obj = vma->vm_private_data;
263 struct msm_gem_object *msm_obj = to_msm_bo(obj);
270 * vm_ops.open/drm_gem_mmap_obj and close get and put
271 * a reference on obj. So, we dont need to hold one here.
273 ret = mutex_lock_interruptible(&msm_obj->lock);
277 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
278 mutex_unlock(&msm_obj->lock);
279 return VM_FAULT_SIGBUS;
282 /* make sure we have pages attached now */
283 pages = get_pages(obj);
285 ret = PTR_ERR(pages);
289 /* We don't use vmf->pgoff since that has the fake offset: */
290 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
292 pfn = page_to_pfn(pages[pgoff]);
294 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
295 pfn, pfn << PAGE_SHIFT);
297 ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
300 mutex_unlock(&msm_obj->lock);
309 * EBUSY is ok: this just means that another thread
310 * already did the job.
312 return VM_FAULT_NOPAGE;
316 return VM_FAULT_SIGBUS;
320 /** get mmap offset */
321 static uint64_t mmap_offset(struct drm_gem_object *obj)
323 struct drm_device *dev = obj->dev;
324 struct msm_gem_object *msm_obj = to_msm_bo(obj);
327 WARN_ON(!mutex_is_locked(&msm_obj->lock));
329 /* Make it mmapable */
330 ret = drm_gem_create_mmap_offset(obj);
333 dev_err(dev->dev, "could not allocate mmap offset\n");
337 return drm_vma_node_offset_addr(&obj->vma_node);
340 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
343 struct msm_gem_object *msm_obj = to_msm_bo(obj);
345 mutex_lock(&msm_obj->lock);
346 offset = mmap_offset(obj);
347 mutex_unlock(&msm_obj->lock);
351 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
352 struct msm_gem_address_space *aspace)
354 struct msm_gem_object *msm_obj = to_msm_bo(obj);
355 struct msm_gem_vma *vma;
357 WARN_ON(!mutex_is_locked(&msm_obj->lock));
359 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
361 return ERR_PTR(-ENOMEM);
363 vma->aspace = aspace;
365 list_add_tail(&vma->list, &msm_obj->vmas);
370 static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
371 struct msm_gem_address_space *aspace)
373 struct msm_gem_object *msm_obj = to_msm_bo(obj);
374 struct msm_gem_vma *vma;
376 WARN_ON(!mutex_is_locked(&msm_obj->lock));
378 list_for_each_entry(vma, &msm_obj->vmas, list) {
379 if (vma->aspace == aspace)
386 static void del_vma(struct msm_gem_vma *vma)
391 list_del(&vma->list);
395 /* Called with msm_obj->lock locked */
397 put_iova(struct drm_gem_object *obj)
399 struct msm_gem_object *msm_obj = to_msm_bo(obj);
400 struct msm_gem_vma *vma, *tmp;
402 WARN_ON(!mutex_is_locked(&msm_obj->lock));
404 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
405 msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt);
410 /* get iova, taking a reference. Should have a matching put */
411 int msm_gem_get_iova(struct drm_gem_object *obj,
412 struct msm_gem_address_space *aspace, uint64_t *iova)
414 struct msm_gem_object *msm_obj = to_msm_bo(obj);
415 struct msm_gem_vma *vma;
418 mutex_lock(&msm_obj->lock);
420 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
421 mutex_unlock(&msm_obj->lock);
425 vma = lookup_vma(obj, aspace);
430 vma = add_vma(obj, aspace);
436 pages = get_pages(obj);
438 ret = PTR_ERR(pages);
442 ret = msm_gem_map_vma(aspace, vma, msm_obj->sgt,
443 obj->size >> PAGE_SHIFT);
450 mutex_unlock(&msm_obj->lock);
456 mutex_unlock(&msm_obj->lock);
460 /* get iova without taking a reference, used in places where you have
461 * already done a 'msm_gem_get_iova()'.
463 uint64_t msm_gem_iova(struct drm_gem_object *obj,
464 struct msm_gem_address_space *aspace)
466 struct msm_gem_object *msm_obj = to_msm_bo(obj);
467 struct msm_gem_vma *vma;
469 mutex_lock(&msm_obj->lock);
470 vma = lookup_vma(obj, aspace);
471 mutex_unlock(&msm_obj->lock);
474 return vma ? vma->iova : 0;
477 void msm_gem_put_iova(struct drm_gem_object *obj,
478 struct msm_gem_address_space *aspace)
481 // NOTE: probably don't need a _locked() version.. we wouldn't
482 // normally unmap here, but instead just mark that it could be
483 // unmapped (if the iova refcnt drops to zero), but then later
484 // if another _get_iova_locked() fails we can start unmapping
485 // things that are no longer needed..
488 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
489 struct drm_mode_create_dumb *args)
491 args->pitch = align_pitch(args->width, args->bpp);
492 args->size = PAGE_ALIGN(args->pitch * args->height);
493 return msm_gem_new_handle(dev, file, args->size,
494 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
497 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
498 uint32_t handle, uint64_t *offset)
500 struct drm_gem_object *obj;
503 /* GEM does all our handle to object mapping */
504 obj = drm_gem_object_lookup(file, handle);
510 *offset = msm_gem_mmap_offset(obj);
512 drm_gem_object_unreference_unlocked(obj);
518 void *msm_gem_get_vaddr(struct drm_gem_object *obj)
520 struct msm_gem_object *msm_obj = to_msm_bo(obj);
523 mutex_lock(&msm_obj->lock);
525 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
526 mutex_unlock(&msm_obj->lock);
527 return ERR_PTR(-EBUSY);
530 /* increment vmap_count *before* vmap() call, so shrinker can
531 * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
532 * This guarantees that we won't try to msm_gem_vunmap() this
533 * same object from within the vmap() call (while we already
534 * hold msm_obj->lock)
536 msm_obj->vmap_count++;
538 if (!msm_obj->vaddr) {
539 struct page **pages = get_pages(obj);
541 ret = PTR_ERR(pages);
544 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
545 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
546 if (msm_obj->vaddr == NULL) {
552 mutex_unlock(&msm_obj->lock);
553 return msm_obj->vaddr;
556 msm_obj->vmap_count--;
557 mutex_unlock(&msm_obj->lock);
561 void msm_gem_put_vaddr(struct drm_gem_object *obj)
563 struct msm_gem_object *msm_obj = to_msm_bo(obj);
565 mutex_lock(&msm_obj->lock);
566 WARN_ON(msm_obj->vmap_count < 1);
567 msm_obj->vmap_count--;
568 mutex_unlock(&msm_obj->lock);
571 /* Update madvise status, returns true if not purged, else
574 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
576 struct msm_gem_object *msm_obj = to_msm_bo(obj);
578 mutex_lock(&msm_obj->lock);
580 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
582 if (msm_obj->madv != __MSM_MADV_PURGED)
583 msm_obj->madv = madv;
585 madv = msm_obj->madv;
587 mutex_unlock(&msm_obj->lock);
589 return (madv != __MSM_MADV_PURGED);
592 void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
594 struct drm_device *dev = obj->dev;
595 struct msm_gem_object *msm_obj = to_msm_bo(obj);
597 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
598 WARN_ON(!is_purgeable(msm_obj));
599 WARN_ON(obj->import_attach);
601 mutex_lock_nested(&msm_obj->lock, subclass);
605 msm_gem_vunmap_locked(obj);
609 msm_obj->madv = __MSM_MADV_PURGED;
611 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
612 drm_gem_free_mmap_offset(obj);
614 /* Our goal here is to return as much of the memory as
615 * is possible back to the system as we are called from OOM.
616 * To do this we must instruct the shmfs to drop all of its
617 * backing pages, *now*.
619 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
621 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
624 mutex_unlock(&msm_obj->lock);
627 static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
629 struct msm_gem_object *msm_obj = to_msm_bo(obj);
631 WARN_ON(!mutex_is_locked(&msm_obj->lock));
633 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
636 vunmap(msm_obj->vaddr);
637 msm_obj->vaddr = NULL;
640 void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
642 struct msm_gem_object *msm_obj = to_msm_bo(obj);
644 mutex_lock_nested(&msm_obj->lock, subclass);
645 msm_gem_vunmap_locked(obj);
646 mutex_unlock(&msm_obj->lock);
649 /* must be called before _move_to_active().. */
650 int msm_gem_sync_object(struct drm_gem_object *obj,
651 struct msm_fence_context *fctx, bool exclusive)
653 struct msm_gem_object *msm_obj = to_msm_bo(obj);
654 struct reservation_object_list *fobj;
655 struct dma_fence *fence;
658 fobj = reservation_object_get_list(msm_obj->resv);
659 if (!fobj || (fobj->shared_count == 0)) {
660 fence = reservation_object_get_excl(msm_obj->resv);
661 /* don't need to wait on our own fences, since ring is fifo */
662 if (fence && (fence->context != fctx->context)) {
663 ret = dma_fence_wait(fence, true);
669 if (!exclusive || !fobj)
672 for (i = 0; i < fobj->shared_count; i++) {
673 fence = rcu_dereference_protected(fobj->shared[i],
674 reservation_object_held(msm_obj->resv));
675 if (fence->context != fctx->context) {
676 ret = dma_fence_wait(fence, true);
685 void msm_gem_move_to_active(struct drm_gem_object *obj,
686 struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
688 struct msm_gem_object *msm_obj = to_msm_bo(obj);
689 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
692 reservation_object_add_excl_fence(msm_obj->resv, fence);
694 reservation_object_add_shared_fence(msm_obj->resv, fence);
695 list_del_init(&msm_obj->mm_list);
696 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
699 void msm_gem_move_to_inactive(struct drm_gem_object *obj)
701 struct drm_device *dev = obj->dev;
702 struct msm_drm_private *priv = dev->dev_private;
703 struct msm_gem_object *msm_obj = to_msm_bo(obj);
705 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
708 list_del_init(&msm_obj->mm_list);
709 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
712 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
714 struct msm_gem_object *msm_obj = to_msm_bo(obj);
715 bool write = !!(op & MSM_PREP_WRITE);
716 unsigned long remain =
717 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
720 ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
723 return remain == 0 ? -EBUSY : -ETIMEDOUT;
727 /* TODO cache maintenance */
732 int msm_gem_cpu_fini(struct drm_gem_object *obj)
734 /* TODO cache maintenance */
738 #ifdef CONFIG_DEBUG_FS
739 static void describe_fence(struct dma_fence *fence, const char *type,
742 if (!dma_fence_is_signaled(fence))
743 seq_printf(m, "\t%9s: %s %s seq %u\n", type,
744 fence->ops->get_driver_name(fence),
745 fence->ops->get_timeline_name(fence),
749 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
751 struct msm_gem_object *msm_obj = to_msm_bo(obj);
752 struct reservation_object *robj = msm_obj->resv;
753 struct reservation_object_list *fobj;
754 struct dma_fence *fence;
755 struct msm_gem_vma *vma;
756 uint64_t off = drm_vma_node_start(&obj->vma_node);
759 mutex_lock(&msm_obj->lock);
761 switch (msm_obj->madv) {
762 case __MSM_MADV_PURGED:
765 case MSM_MADV_DONTNEED:
768 case MSM_MADV_WILLNEED:
774 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t",
775 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
776 obj->name, kref_read(&obj->refcount),
777 off, msm_obj->vaddr);
779 /* FIXME: we need to print the address space here too */
780 list_for_each_entry(vma, &msm_obj->vmas, list)
781 seq_printf(m, " %08llx", vma->iova);
783 seq_printf(m, " %zu%s\n", obj->size, madv);
786 fobj = rcu_dereference(robj->fence);
788 unsigned int i, shared_count = fobj->shared_count;
790 for (i = 0; i < shared_count; i++) {
791 fence = rcu_dereference(fobj->shared[i]);
792 describe_fence(fence, "Shared", m);
796 fence = rcu_dereference(robj->fence_excl);
798 describe_fence(fence, "Exclusive", m);
801 mutex_unlock(&msm_obj->lock);
804 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
806 struct msm_gem_object *msm_obj;
810 list_for_each_entry(msm_obj, list, mm_list) {
811 struct drm_gem_object *obj = &msm_obj->base;
813 msm_gem_describe(obj, m);
818 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
822 void msm_gem_free_object(struct drm_gem_object *obj)
824 struct drm_device *dev = obj->dev;
825 struct msm_gem_object *msm_obj = to_msm_bo(obj);
827 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
829 /* object should not be on active list: */
830 WARN_ON(is_active(msm_obj));
832 list_del(&msm_obj->mm_list);
834 mutex_lock(&msm_obj->lock);
838 if (obj->import_attach) {
840 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
842 /* Don't drop the pages for imported dmabuf, as they are not
843 * ours, just free the array we allocated:
846 kvfree(msm_obj->pages);
848 drm_prime_gem_destroy(obj, msm_obj->sgt);
850 msm_gem_vunmap_locked(obj);
854 if (msm_obj->resv == &msm_obj->_resv)
855 reservation_object_fini(msm_obj->resv);
857 drm_gem_object_release(obj);
859 mutex_unlock(&msm_obj->lock);
863 /* convenience method to construct a GEM buffer object, and userspace handle */
864 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
865 uint32_t size, uint32_t flags, uint32_t *handle)
867 struct drm_gem_object *obj;
870 obj = msm_gem_new(dev, size, flags);
875 ret = drm_gem_handle_create(file, obj, handle);
877 /* drop reference from allocate - handle holds it now */
878 drm_gem_object_unreference_unlocked(obj);
883 static int msm_gem_new_impl(struct drm_device *dev,
884 uint32_t size, uint32_t flags,
885 struct reservation_object *resv,
886 struct drm_gem_object **obj,
887 bool struct_mutex_locked)
889 struct msm_drm_private *priv = dev->dev_private;
890 struct msm_gem_object *msm_obj;
892 switch (flags & MSM_BO_CACHE_MASK) {
893 case MSM_BO_UNCACHED:
898 dev_err(dev->dev, "invalid cache flag: %x\n",
899 (flags & MSM_BO_CACHE_MASK));
903 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
907 mutex_init(&msm_obj->lock);
909 msm_obj->flags = flags;
910 msm_obj->madv = MSM_MADV_WILLNEED;
913 msm_obj->resv = resv;
915 msm_obj->resv = &msm_obj->_resv;
916 reservation_object_init(msm_obj->resv);
919 INIT_LIST_HEAD(&msm_obj->submit_entry);
920 INIT_LIST_HEAD(&msm_obj->vmas);
922 if (struct_mutex_locked) {
923 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
924 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
926 mutex_lock(&dev->struct_mutex);
927 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
928 mutex_unlock(&dev->struct_mutex);
931 *obj = &msm_obj->base;
936 static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
937 uint32_t size, uint32_t flags, bool struct_mutex_locked)
939 struct msm_drm_private *priv = dev->dev_private;
940 struct drm_gem_object *obj = NULL;
941 bool use_vram = false;
944 size = PAGE_ALIGN(size);
946 if (!iommu_present(&platform_bus_type))
948 else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
951 if (WARN_ON(use_vram && !priv->vram.size))
952 return ERR_PTR(-EINVAL);
954 /* Disallow zero sized objects as they make the underlying
955 * infrastructure grumpy
958 return ERR_PTR(-EINVAL);
960 ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked);
965 struct msm_gem_vma *vma;
967 struct msm_gem_object *msm_obj = to_msm_bo(obj);
969 mutex_lock(&msm_obj->lock);
971 vma = add_vma(obj, NULL);
972 mutex_unlock(&msm_obj->lock);
978 to_msm_bo(obj)->vram_node = &vma->node;
980 drm_gem_private_object_init(dev, obj, size);
982 pages = get_pages(obj);
984 ret = PTR_ERR(pages);
988 vma->iova = physaddr(obj);
990 ret = drm_gem_object_init(dev, obj, size);
998 drm_gem_object_unreference_unlocked(obj);
1002 struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
1003 uint32_t size, uint32_t flags)
1005 return _msm_gem_new(dev, size, flags, true);
1008 struct drm_gem_object *msm_gem_new(struct drm_device *dev,
1009 uint32_t size, uint32_t flags)
1011 return _msm_gem_new(dev, size, flags, false);
1014 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1015 struct dma_buf *dmabuf, struct sg_table *sgt)
1017 struct msm_gem_object *msm_obj;
1018 struct drm_gem_object *obj;
1022 /* if we don't have IOMMU, don't bother pretending we can import: */
1023 if (!iommu_present(&platform_bus_type)) {
1024 dev_err(dev->dev, "cannot import without IOMMU\n");
1025 return ERR_PTR(-EINVAL);
1028 size = PAGE_ALIGN(dmabuf->size);
1030 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false);
1032 return ERR_PTR(ret);
1034 drm_gem_private_object_init(dev, obj, size);
1036 npages = size / PAGE_SIZE;
1038 msm_obj = to_msm_bo(obj);
1039 mutex_lock(&msm_obj->lock);
1041 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1042 if (!msm_obj->pages) {
1043 mutex_unlock(&msm_obj->lock);
1048 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
1050 mutex_unlock(&msm_obj->lock);
1054 mutex_unlock(&msm_obj->lock);
1058 drm_gem_object_unreference_unlocked(obj);
1059 return ERR_PTR(ret);
1062 static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1063 uint32_t flags, struct msm_gem_address_space *aspace,
1064 struct drm_gem_object **bo, uint64_t *iova, bool locked)
1067 struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
1071 return ERR_CAST(obj);
1074 ret = msm_gem_get_iova(obj, aspace, iova);
1076 drm_gem_object_unreference(obj);
1077 return ERR_PTR(ret);
1081 vaddr = msm_gem_get_vaddr(obj);
1082 if (IS_ERR(vaddr)) {
1083 msm_gem_put_iova(obj, aspace);
1084 drm_gem_object_unreference(obj);
1085 return ERR_CAST(vaddr);
1094 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1095 uint32_t flags, struct msm_gem_address_space *aspace,
1096 struct drm_gem_object **bo, uint64_t *iova)
1098 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
1101 void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
1102 uint32_t flags, struct msm_gem_address_space *aspace,
1103 struct drm_gem_object **bo, uint64_t *iova)
1105 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);