2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/spinlock.h>
19 #include <linux/shmem_fs.h>
20 #include <linux/dma-buf.h>
21 #include <linux/pfn_t.h>
24 #include "msm_fence.h"
29 static dma_addr_t physaddr(struct drm_gem_object *obj)
31 struct msm_gem_object *msm_obj = to_msm_bo(obj);
32 struct msm_drm_private *priv = obj->dev->dev_private;
33 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
37 static bool use_pages(struct drm_gem_object *obj)
39 struct msm_gem_object *msm_obj = to_msm_bo(obj);
40 return !msm_obj->vram_node;
44 * Cache sync.. this is a bit over-complicated, to fit dma-mapping
45 * API. Really GPU cache is out of scope here (handled on cmdstream)
46 * and all we need to do is invalidate newly allocated pages before
47 * mapping to CPU as uncached/writecombine.
49 * On top of this, we have the added headache, that depending on
50 * display generation, the display's iommu may be wired up to either
51 * the toplevel drm device (mdss), or to the mdp sub-node, meaning
52 * that here we either have dma-direct or iommu ops.
54 * Let this be a cautionary tail of abstraction gone wrong.
57 static void sync_for_device(struct msm_gem_object *msm_obj)
59 struct device *dev = msm_obj->base.dev->dev;
61 if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
62 dma_sync_sg_for_device(dev, msm_obj->sgt->sgl,
63 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
65 dma_map_sg(dev, msm_obj->sgt->sgl,
66 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
70 static void sync_for_cpu(struct msm_gem_object *msm_obj)
72 struct device *dev = msm_obj->base.dev->dev;
74 if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
75 dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl,
76 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
78 dma_unmap_sg(dev, msm_obj->sgt->sgl,
79 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
83 /* allocate pages from VRAM carveout, used when no IOMMU: */
84 static struct page **get_pages_vram(struct drm_gem_object *obj,
87 struct msm_gem_object *msm_obj = to_msm_bo(obj);
88 struct msm_drm_private *priv = obj->dev->dev_private;
93 p = drm_malloc_ab(npages, sizeof(struct page *));
95 return ERR_PTR(-ENOMEM);
97 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node,
98 npages, 0, DRM_MM_SEARCH_DEFAULT);
104 paddr = physaddr(obj);
105 for (i = 0; i < npages; i++) {
106 p[i] = phys_to_page(paddr);
113 /* called with dev->struct_mutex held */
114 static struct page **get_pages(struct drm_gem_object *obj)
116 struct msm_gem_object *msm_obj = to_msm_bo(obj);
118 if (!msm_obj->pages) {
119 struct drm_device *dev = obj->dev;
121 int npages = obj->size >> PAGE_SHIFT;
124 p = drm_gem_get_pages(obj);
126 p = get_pages_vram(obj, npages);
129 dev_err(dev->dev, "could not get pages: %ld\n",
136 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
137 if (IS_ERR(msm_obj->sgt)) {
138 void *ptr = ERR_CAST(msm_obj->sgt);
140 dev_err(dev->dev, "failed to allocate sgt\n");
145 /* For non-cached buffers, ensure the new pages are clean
146 * because display controller, GPU, etc. are not coherent:
148 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
149 sync_for_device(msm_obj);
152 return msm_obj->pages;
155 static void put_pages(struct drm_gem_object *obj)
157 struct msm_gem_object *msm_obj = to_msm_bo(obj);
159 if (msm_obj->pages) {
161 /* For non-cached buffers, ensure the new
162 * pages are clean because display controller,
163 * GPU, etc. are not coherent:
165 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
166 sync_for_cpu(msm_obj);
168 sg_free_table(msm_obj->sgt);
173 drm_gem_put_pages(obj, msm_obj->pages, true, false);
175 drm_mm_remove_node(msm_obj->vram_node);
176 drm_free_large(msm_obj->pages);
179 msm_obj->pages = NULL;
183 struct page **msm_gem_get_pages(struct drm_gem_object *obj)
185 struct drm_device *dev = obj->dev;
187 mutex_lock(&dev->struct_mutex);
189 mutex_unlock(&dev->struct_mutex);
193 void msm_gem_put_pages(struct drm_gem_object *obj)
195 /* when we start tracking the pin count, then do something here */
198 int msm_gem_mmap_obj(struct drm_gem_object *obj,
199 struct vm_area_struct *vma)
201 struct msm_gem_object *msm_obj = to_msm_bo(obj);
203 vma->vm_flags &= ~VM_PFNMAP;
204 vma->vm_flags |= VM_MIXEDMAP;
206 if (msm_obj->flags & MSM_BO_WC) {
207 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
208 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
209 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
212 * Shunt off cached objs to shmem file so they have their own
213 * address_space (so unmap_mapping_range does what we want,
214 * in particular in the case of mmap'd dmabufs)
219 vma->vm_file = obj->filp;
221 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
227 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
231 ret = drm_gem_mmap(filp, vma);
233 DBG("mmap failed: %d", ret);
237 return msm_gem_mmap_obj(vma->vm_private_data, vma);
240 int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
242 struct drm_gem_object *obj = vma->vm_private_data;
243 struct drm_device *dev = obj->dev;
244 struct msm_drm_private *priv = dev->dev_private;
250 /* This should only happen if userspace tries to pass a mmap'd
251 * but unfaulted gem bo vaddr into submit ioctl, triggering
252 * a page fault while struct_mutex is already held. This is
253 * not a valid use-case so just bail.
255 if (priv->struct_mutex_task == current)
256 return VM_FAULT_SIGBUS;
258 /* Make sure we don't parallel update on a fault, nor move or remove
259 * something from beneath our feet
261 ret = mutex_lock_interruptible(&dev->struct_mutex);
265 /* make sure we have pages attached now */
266 pages = get_pages(obj);
268 ret = PTR_ERR(pages);
272 /* We don't use vmf->pgoff since that has the fake offset: */
273 pgoff = ((unsigned long)vmf->virtual_address -
274 vma->vm_start) >> PAGE_SHIFT;
276 pfn = page_to_pfn(pages[pgoff]);
278 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
279 pfn, pfn << PAGE_SHIFT);
281 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
282 __pfn_to_pfn_t(pfn, PFN_DEV));
285 mutex_unlock(&dev->struct_mutex);
294 * EBUSY is ok: this just means that another thread
295 * already did the job.
297 return VM_FAULT_NOPAGE;
301 return VM_FAULT_SIGBUS;
305 /** get mmap offset */
306 static uint64_t mmap_offset(struct drm_gem_object *obj)
308 struct drm_device *dev = obj->dev;
311 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
313 /* Make it mmapable */
314 ret = drm_gem_create_mmap_offset(obj);
317 dev_err(dev->dev, "could not allocate mmap offset\n");
321 return drm_vma_node_offset_addr(&obj->vma_node);
324 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
327 mutex_lock(&obj->dev->struct_mutex);
328 offset = mmap_offset(obj);
329 mutex_unlock(&obj->dev->struct_mutex);
334 put_iova(struct drm_gem_object *obj)
336 struct drm_device *dev = obj->dev;
337 struct msm_drm_private *priv = obj->dev->dev_private;
338 struct msm_gem_object *msm_obj = to_msm_bo(obj);
341 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
343 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
344 struct msm_mmu *mmu = priv->mmus[id];
345 if (mmu && msm_obj->domain[id].iova) {
346 uint32_t offset = msm_obj->domain[id].iova;
347 mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
348 msm_obj->domain[id].iova = 0;
353 /* should be called under struct_mutex.. although it can be called
354 * from atomic context without struct_mutex to acquire an extra
355 * iova ref if you know one is already held.
357 * That means when I do eventually need to add support for unpinning
358 * the refcnt counter needs to be atomic_t.
360 int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
363 struct msm_gem_object *msm_obj = to_msm_bo(obj);
366 if (!msm_obj->domain[id].iova) {
367 struct msm_drm_private *priv = obj->dev->dev_private;
368 struct page **pages = get_pages(obj);
371 return PTR_ERR(pages);
373 if (iommu_present(&platform_bus_type)) {
374 struct msm_mmu *mmu = priv->mmus[id];
380 offset = (uint32_t)mmap_offset(obj);
381 ret = mmu->funcs->map(mmu, offset, msm_obj->sgt,
382 obj->size, IOMMU_READ | IOMMU_WRITE);
383 msm_obj->domain[id].iova = offset;
385 msm_obj->domain[id].iova = physaddr(obj);
390 *iova = msm_obj->domain[id].iova;
395 /* get iova, taking a reference. Should have a matching put */
396 int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
398 struct msm_gem_object *msm_obj = to_msm_bo(obj);
401 /* this is safe right now because we don't unmap until the
404 if (msm_obj->domain[id].iova) {
405 *iova = msm_obj->domain[id].iova;
409 mutex_lock(&obj->dev->struct_mutex);
410 ret = msm_gem_get_iova_locked(obj, id, iova);
411 mutex_unlock(&obj->dev->struct_mutex);
415 /* get iova without taking a reference, used in places where you have
416 * already done a 'msm_gem_get_iova()'.
418 uint32_t msm_gem_iova(struct drm_gem_object *obj, int id)
420 struct msm_gem_object *msm_obj = to_msm_bo(obj);
421 WARN_ON(!msm_obj->domain[id].iova);
422 return msm_obj->domain[id].iova;
425 void msm_gem_put_iova(struct drm_gem_object *obj, int id)
428 // NOTE: probably don't need a _locked() version.. we wouldn't
429 // normally unmap here, but instead just mark that it could be
430 // unmapped (if the iova refcnt drops to zero), but then later
431 // if another _get_iova_locked() fails we can start unmapping
432 // things that are no longer needed..
435 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
436 struct drm_mode_create_dumb *args)
438 args->pitch = align_pitch(args->width, args->bpp);
439 args->size = PAGE_ALIGN(args->pitch * args->height);
440 return msm_gem_new_handle(dev, file, args->size,
441 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
444 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
445 uint32_t handle, uint64_t *offset)
447 struct drm_gem_object *obj;
450 /* GEM does all our handle to object mapping */
451 obj = drm_gem_object_lookup(file, handle);
457 *offset = msm_gem_mmap_offset(obj);
459 drm_gem_object_unreference_unlocked(obj);
465 void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
467 struct msm_gem_object *msm_obj = to_msm_bo(obj);
468 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
469 if (!msm_obj->vaddr) {
470 struct page **pages = get_pages(obj);
472 return ERR_CAST(pages);
473 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
474 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
475 if (msm_obj->vaddr == NULL)
476 return ERR_PTR(-ENOMEM);
478 msm_obj->vmap_count++;
479 return msm_obj->vaddr;
482 void *msm_gem_get_vaddr(struct drm_gem_object *obj)
485 mutex_lock(&obj->dev->struct_mutex);
486 ret = msm_gem_get_vaddr_locked(obj);
487 mutex_unlock(&obj->dev->struct_mutex);
491 void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
493 struct msm_gem_object *msm_obj = to_msm_bo(obj);
494 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
495 WARN_ON(msm_obj->vmap_count < 1);
496 msm_obj->vmap_count--;
499 void msm_gem_put_vaddr(struct drm_gem_object *obj)
501 mutex_lock(&obj->dev->struct_mutex);
502 msm_gem_put_vaddr_locked(obj);
503 mutex_unlock(&obj->dev->struct_mutex);
506 /* Update madvise status, returns true if not purged, else
509 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
511 struct msm_gem_object *msm_obj = to_msm_bo(obj);
513 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
515 if (msm_obj->madv != __MSM_MADV_PURGED)
516 msm_obj->madv = madv;
518 return (msm_obj->madv != __MSM_MADV_PURGED);
521 void msm_gem_purge(struct drm_gem_object *obj)
523 struct drm_device *dev = obj->dev;
524 struct msm_gem_object *msm_obj = to_msm_bo(obj);
526 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
527 WARN_ON(!is_purgeable(msm_obj));
528 WARN_ON(obj->import_attach);
536 msm_obj->madv = __MSM_MADV_PURGED;
538 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
539 drm_gem_free_mmap_offset(obj);
541 /* Our goal here is to return as much of the memory as
542 * is possible back to the system as we are called from OOM.
543 * To do this we must instruct the shmfs to drop all of its
544 * backing pages, *now*.
546 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
548 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
552 void msm_gem_vunmap(struct drm_gem_object *obj)
554 struct msm_gem_object *msm_obj = to_msm_bo(obj);
556 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
559 vunmap(msm_obj->vaddr);
560 msm_obj->vaddr = NULL;
563 /* must be called before _move_to_active().. */
564 int msm_gem_sync_object(struct drm_gem_object *obj,
565 struct msm_fence_context *fctx, bool exclusive)
567 struct msm_gem_object *msm_obj = to_msm_bo(obj);
568 struct reservation_object_list *fobj;
573 /* NOTE: _reserve_shared() must happen before _add_shared_fence(),
574 * which makes this a slightly strange place to call it. OTOH this
575 * is a convenient can-fail point to hook it in. (And similar to
576 * how etnaviv and nouveau handle this.)
578 ret = reservation_object_reserve_shared(msm_obj->resv);
583 fobj = reservation_object_get_list(msm_obj->resv);
584 if (!fobj || (fobj->shared_count == 0)) {
585 fence = reservation_object_get_excl(msm_obj->resv);
586 /* don't need to wait on our own fences, since ring is fifo */
587 if (fence && (fence->context != fctx->context)) {
588 ret = fence_wait(fence, true);
594 if (!exclusive || !fobj)
597 for (i = 0; i < fobj->shared_count; i++) {
598 fence = rcu_dereference_protected(fobj->shared[i],
599 reservation_object_held(msm_obj->resv));
600 if (fence->context != fctx->context) {
601 ret = fence_wait(fence, true);
610 void msm_gem_move_to_active(struct drm_gem_object *obj,
611 struct msm_gpu *gpu, bool exclusive, struct fence *fence)
613 struct msm_gem_object *msm_obj = to_msm_bo(obj);
614 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
617 reservation_object_add_excl_fence(msm_obj->resv, fence);
619 reservation_object_add_shared_fence(msm_obj->resv, fence);
620 list_del_init(&msm_obj->mm_list);
621 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
624 void msm_gem_move_to_inactive(struct drm_gem_object *obj)
626 struct drm_device *dev = obj->dev;
627 struct msm_drm_private *priv = dev->dev_private;
628 struct msm_gem_object *msm_obj = to_msm_bo(obj);
630 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
633 list_del_init(&msm_obj->mm_list);
634 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
637 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
639 struct msm_gem_object *msm_obj = to_msm_bo(obj);
640 bool write = !!(op & MSM_PREP_WRITE);
641 unsigned long remain =
642 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
645 ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
648 return remain == 0 ? -EBUSY : -ETIMEDOUT;
652 /* TODO cache maintenance */
657 int msm_gem_cpu_fini(struct drm_gem_object *obj)
659 /* TODO cache maintenance */
663 #ifdef CONFIG_DEBUG_FS
664 static void describe_fence(struct fence *fence, const char *type,
667 if (!fence_is_signaled(fence))
668 seq_printf(m, "\t%9s: %s %s seq %u\n", type,
669 fence->ops->get_driver_name(fence),
670 fence->ops->get_timeline_name(fence),
674 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
676 struct msm_gem_object *msm_obj = to_msm_bo(obj);
677 struct reservation_object *robj = msm_obj->resv;
678 struct reservation_object_list *fobj;
680 uint64_t off = drm_vma_node_start(&obj->vma_node);
683 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
685 switch (msm_obj->madv) {
686 case __MSM_MADV_PURGED:
689 case MSM_MADV_DONTNEED:
692 case MSM_MADV_WILLNEED:
698 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu%s\n",
699 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
700 obj->name, obj->refcount.refcount.counter,
701 off, msm_obj->vaddr, obj->size, madv);
704 fobj = rcu_dereference(robj->fence);
706 unsigned int i, shared_count = fobj->shared_count;
708 for (i = 0; i < shared_count; i++) {
709 fence = rcu_dereference(fobj->shared[i]);
710 describe_fence(fence, "Shared", m);
714 fence = rcu_dereference(robj->fence_excl);
716 describe_fence(fence, "Exclusive", m);
720 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
722 struct msm_gem_object *msm_obj;
726 list_for_each_entry(msm_obj, list, mm_list) {
727 struct drm_gem_object *obj = &msm_obj->base;
729 msm_gem_describe(obj, m);
734 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
738 void msm_gem_free_object(struct drm_gem_object *obj)
740 struct drm_device *dev = obj->dev;
741 struct msm_gem_object *msm_obj = to_msm_bo(obj);
743 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
745 /* object should not be on active list: */
746 WARN_ON(is_active(msm_obj));
748 list_del(&msm_obj->mm_list);
752 if (obj->import_attach) {
754 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
756 /* Don't drop the pages for imported dmabuf, as they are not
757 * ours, just free the array we allocated:
760 drm_free_large(msm_obj->pages);
762 drm_prime_gem_destroy(obj, msm_obj->sgt);
768 if (msm_obj->resv == &msm_obj->_resv)
769 reservation_object_fini(msm_obj->resv);
771 drm_gem_object_release(obj);
776 /* convenience method to construct a GEM buffer object, and userspace handle */
777 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
778 uint32_t size, uint32_t flags, uint32_t *handle)
780 struct drm_gem_object *obj;
783 ret = mutex_lock_interruptible(&dev->struct_mutex);
787 obj = msm_gem_new(dev, size, flags);
789 mutex_unlock(&dev->struct_mutex);
794 ret = drm_gem_handle_create(file, obj, handle);
796 /* drop reference from allocate - handle holds it now */
797 drm_gem_object_unreference_unlocked(obj);
802 static int msm_gem_new_impl(struct drm_device *dev,
803 uint32_t size, uint32_t flags,
804 struct reservation_object *resv,
805 struct drm_gem_object **obj)
807 struct msm_drm_private *priv = dev->dev_private;
808 struct msm_gem_object *msm_obj;
810 bool use_vram = false;
812 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
814 switch (flags & MSM_BO_CACHE_MASK) {
815 case MSM_BO_UNCACHED:
820 dev_err(dev->dev, "invalid cache flag: %x\n",
821 (flags & MSM_BO_CACHE_MASK));
825 if (!iommu_present(&platform_bus_type))
827 else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
830 if (WARN_ON(use_vram && !priv->vram.size))
833 sz = sizeof(*msm_obj);
835 sz += sizeof(struct drm_mm_node);
837 msm_obj = kzalloc(sz, GFP_KERNEL);
842 msm_obj->vram_node = (void *)&msm_obj[1];
844 msm_obj->flags = flags;
845 msm_obj->madv = MSM_MADV_WILLNEED;
848 msm_obj->resv = resv;
850 msm_obj->resv = &msm_obj->_resv;
851 reservation_object_init(msm_obj->resv);
854 INIT_LIST_HEAD(&msm_obj->submit_entry);
855 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
857 *obj = &msm_obj->base;
862 struct drm_gem_object *msm_gem_new(struct drm_device *dev,
863 uint32_t size, uint32_t flags)
865 struct drm_gem_object *obj = NULL;
868 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
870 size = PAGE_ALIGN(size);
872 ret = msm_gem_new_impl(dev, size, flags, NULL, &obj);
876 if (use_pages(obj)) {
877 ret = drm_gem_object_init(dev, obj, size);
881 drm_gem_private_object_init(dev, obj, size);
887 drm_gem_object_unreference(obj);
891 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
892 struct dma_buf *dmabuf, struct sg_table *sgt)
894 struct msm_gem_object *msm_obj;
895 struct drm_gem_object *obj;
899 /* if we don't have IOMMU, don't bother pretending we can import: */
900 if (!iommu_present(&platform_bus_type)) {
901 dev_err(dev->dev, "cannot import without IOMMU\n");
902 return ERR_PTR(-EINVAL);
905 size = PAGE_ALIGN(dmabuf->size);
907 /* Take mutex so we can modify the inactive list in msm_gem_new_impl */
908 mutex_lock(&dev->struct_mutex);
909 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj);
910 mutex_unlock(&dev->struct_mutex);
915 drm_gem_private_object_init(dev, obj, size);
917 npages = size / PAGE_SIZE;
919 msm_obj = to_msm_bo(obj);
921 msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
922 if (!msm_obj->pages) {
927 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
934 drm_gem_object_unreference_unlocked(obj);