2 * Copyright © 2012-2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include <drm/i915_drm.h>
28 #include "i915_trace.h"
29 #include "intel_drv.h"
30 #include <linux/mmu_context.h>
31 #include <linux/mmu_notifier.h>
32 #include <linux/mempolicy.h>
33 #include <linux/swap.h>
35 struct i915_mm_struct {
37 struct drm_device *dev;
38 struct i915_mmu_notifier *mn;
39 struct hlist_node node;
41 struct work_struct work;
44 #if defined(CONFIG_MMU_NOTIFIER)
45 #include <linux/interval_tree.h>
47 struct i915_mmu_notifier {
49 struct hlist_node node;
50 struct mmu_notifier mn;
51 struct rb_root objects;
52 struct list_head linear;
56 struct i915_mmu_object {
57 struct i915_mmu_notifier *mn;
58 struct interval_tree_node it;
59 struct list_head link;
60 struct drm_i915_gem_object *obj;
61 struct work_struct work;
66 static void __cancel_userptr__worker(struct work_struct *work)
68 struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
69 struct drm_i915_gem_object *obj = mo->obj;
70 struct drm_device *dev = obj->base.dev;
72 mutex_lock(&dev->struct_mutex);
73 /* Cancel any active worker and force us to re-evaluate gup */
74 obj->userptr.work = NULL;
76 if (obj->pages != NULL) {
77 struct drm_i915_private *dev_priv = to_i915(dev);
78 struct i915_vma *vma, *tmp;
79 bool was_interruptible;
81 was_interruptible = dev_priv->mm.interruptible;
82 dev_priv->mm.interruptible = false;
84 list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) {
85 int ret = i915_vma_unbind(vma);
86 WARN_ON(ret && ret != -EIO);
88 WARN_ON(i915_gem_object_put_pages(obj));
90 dev_priv->mm.interruptible = was_interruptible;
93 drm_gem_object_unreference(&obj->base);
94 mutex_unlock(&dev->struct_mutex);
97 static unsigned long cancel_userptr(struct i915_mmu_object *mo)
99 unsigned long end = mo->obj->userptr.ptr + mo->obj->base.size;
101 /* The mmu_object is released late when destroying the
102 * GEM object so it is entirely possible to gain a
103 * reference on an object in the process of being freed
104 * since our serialisation is via the spinlock and not
105 * the struct_mutex - and consequently use it after it
106 * is freed and then double free it.
108 if (mo->active && kref_get_unless_zero(&mo->obj->base.refcount)) {
109 schedule_work(&mo->work);
110 /* only schedule one work packet to avoid the refleak */
117 static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
118 struct mm_struct *mm,
122 struct i915_mmu_notifier *mn =
123 container_of(_mn, struct i915_mmu_notifier, mn);
124 struct i915_mmu_object *mo;
126 /* interval ranges are inclusive, but invalidate range is exclusive */
129 spin_lock(&mn->lock);
130 if (mn->has_linear) {
131 list_for_each_entry(mo, &mn->linear, link) {
132 if (mo->it.last < start || mo->it.start > end)
138 struct interval_tree_node *it;
140 it = interval_tree_iter_first(&mn->objects, start, end);
142 mo = container_of(it, struct i915_mmu_object, it);
143 start = cancel_userptr(mo);
144 it = interval_tree_iter_next(it, start, end);
147 spin_unlock(&mn->lock);
150 static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
151 .invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start,
154 static struct i915_mmu_notifier *
155 i915_mmu_notifier_create(struct mm_struct *mm)
157 struct i915_mmu_notifier *mn;
160 mn = kmalloc(sizeof(*mn), GFP_KERNEL);
162 return ERR_PTR(-ENOMEM);
164 spin_lock_init(&mn->lock);
165 mn->mn.ops = &i915_gem_userptr_notifier;
166 mn->objects = RB_ROOT;
167 INIT_LIST_HEAD(&mn->linear);
168 mn->has_linear = false;
170 /* Protected by mmap_sem (write-lock) */
171 ret = __mmu_notifier_register(&mn->mn, mm);
181 i915_mmu_notifier_add(struct drm_device *dev,
182 struct i915_mmu_notifier *mn,
183 struct i915_mmu_object *mo)
185 struct interval_tree_node *it;
188 /* By this point we have already done a lot of expensive setup that
189 * we do not want to repeat just because the caller (e.g. X) has a
190 * signal pending (and partly because of that expensive setup, X
191 * using an interrupt timer is likely to get stuck in an EINTR loop).
193 mutex_lock(&dev->struct_mutex);
195 /* Make sure we drop the final active reference (and thereby
196 * remove the objects from the interval tree) before we do
197 * the check for overlapping objects.
199 i915_gem_retire_requests(dev);
201 spin_lock(&mn->lock);
202 it = interval_tree_iter_first(&mn->objects,
203 mo->it.start, mo->it.last);
205 struct drm_i915_gem_object *obj;
207 /* We only need to check the first object in the range as it
208 * either has cancelled gup work queued and we need to
209 * return back to the user to give time for the gup-workers
210 * to flush their object references upon which the object will
211 * be removed from the interval-tree, or the the range is
212 * still in use by another client and the overlap is invalid.
214 * If we do have an overlap, we cannot use the interval tree
215 * for fast range invalidation.
218 obj = container_of(it, struct i915_mmu_object, it)->obj;
219 if (!obj->userptr.workers)
220 mn->has_linear = mo->is_linear = true;
224 interval_tree_insert(&mo->it, &mn->objects);
227 list_add(&mo->link, &mn->linear);
229 spin_unlock(&mn->lock);
230 mutex_unlock(&dev->struct_mutex);
235 static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mn)
237 struct i915_mmu_object *mo;
239 list_for_each_entry(mo, &mn->linear, link)
247 i915_mmu_notifier_del(struct i915_mmu_notifier *mn,
248 struct i915_mmu_object *mo)
250 spin_lock(&mn->lock);
253 mn->has_linear = i915_mmu_notifier_has_linear(mn);
255 interval_tree_remove(&mo->it, &mn->objects);
256 spin_unlock(&mn->lock);
260 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
262 struct i915_mmu_object *mo;
264 mo = obj->userptr.mmu_object;
268 i915_mmu_notifier_del(mo->mn, mo);
271 obj->userptr.mmu_object = NULL;
274 static struct i915_mmu_notifier *
275 i915_mmu_notifier_find(struct i915_mm_struct *mm)
277 struct i915_mmu_notifier *mn = mm->mn;
283 down_write(&mm->mm->mmap_sem);
284 mutex_lock(&to_i915(mm->dev)->mm_lock);
285 if ((mn = mm->mn) == NULL) {
286 mn = i915_mmu_notifier_create(mm->mm);
290 mutex_unlock(&to_i915(mm->dev)->mm_lock);
291 up_write(&mm->mm->mmap_sem);
297 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
300 struct i915_mmu_notifier *mn;
301 struct i915_mmu_object *mo;
304 if (flags & I915_USERPTR_UNSYNCHRONIZED)
305 return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
307 if (WARN_ON(obj->userptr.mm == NULL))
310 mn = i915_mmu_notifier_find(obj->userptr.mm);
314 mo = kzalloc(sizeof(*mo), GFP_KERNEL);
319 mo->it.start = obj->userptr.ptr;
320 mo->it.last = mo->it.start + obj->base.size - 1;
322 INIT_WORK(&mo->work, __cancel_userptr__worker);
324 ret = i915_mmu_notifier_add(obj->base.dev, mn, mo);
330 obj->userptr.mmu_object = mo;
335 i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
336 struct mm_struct *mm)
341 mmu_notifier_unregister(&mn->mn, mm);
348 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
353 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
356 if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0)
359 if (!capable(CAP_SYS_ADMIN))
366 i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
367 struct mm_struct *mm)
373 static struct i915_mm_struct *
374 __i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
376 struct i915_mm_struct *mm;
378 /* Protected by dev_priv->mm_lock */
379 hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
387 i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
389 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
390 struct i915_mm_struct *mm;
393 /* During release of the GEM object we hold the struct_mutex. This
394 * precludes us from calling mmput() at that time as that may be
395 * the last reference and so call exit_mmap(). exit_mmap() will
396 * attempt to reap the vma, and if we were holding a GTT mmap
397 * would then call drm_gem_vm_close() and attempt to reacquire
398 * the struct mutex. So in order to avoid that recursion, we have
399 * to defer releasing the mm reference until after we drop the
400 * struct_mutex, i.e. we need to schedule a worker to do the clean
403 mutex_lock(&dev_priv->mm_lock);
404 mm = __i915_mm_struct_find(dev_priv, current->mm);
406 mm = kmalloc(sizeof(*mm), GFP_KERNEL);
412 kref_init(&mm->kref);
413 mm->dev = obj->base.dev;
415 mm->mm = current->mm;
416 atomic_inc(¤t->mm->mm_count);
420 /* Protected by dev_priv->mm_lock */
421 hash_add(dev_priv->mm_structs,
422 &mm->node, (unsigned long)mm->mm);
426 obj->userptr.mm = mm;
428 mutex_unlock(&dev_priv->mm_lock);
433 __i915_mm_struct_free__worker(struct work_struct *work)
435 struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
436 i915_mmu_notifier_free(mm->mn, mm->mm);
442 __i915_mm_struct_free(struct kref *kref)
444 struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
446 /* Protected by dev_priv->mm_lock */
448 mutex_unlock(&to_i915(mm->dev)->mm_lock);
450 INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
451 schedule_work(&mm->work);
455 i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
457 if (obj->userptr.mm == NULL)
460 kref_put_mutex(&obj->userptr.mm->kref,
461 __i915_mm_struct_free,
462 &to_i915(obj->base.dev)->mm_lock);
463 obj->userptr.mm = NULL;
466 struct get_pages_work {
467 struct work_struct work;
468 struct drm_i915_gem_object *obj;
469 struct task_struct *task;
472 #if IS_ENABLED(CONFIG_SWIOTLB)
473 #define swiotlb_active() swiotlb_nr_tbl()
475 #define swiotlb_active() 0
479 st_set_pages(struct sg_table **st, struct page **pvec, int num_pages)
481 struct scatterlist *sg;
484 *st = kmalloc(sizeof(**st), GFP_KERNEL);
488 if (swiotlb_active()) {
489 ret = sg_alloc_table(*st, num_pages, GFP_KERNEL);
493 for_each_sg((*st)->sgl, sg, num_pages, n)
494 sg_set_page(sg, pvec[n], PAGE_SIZE, 0);
496 ret = sg_alloc_table_from_pages(*st, pvec, num_pages,
497 0, num_pages << PAGE_SHIFT,
512 __i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
513 struct page **pvec, int num_pages)
517 ret = st_set_pages(&obj->pages, pvec, num_pages);
521 ret = i915_gem_gtt_prepare_object(obj);
523 sg_free_table(obj->pages);
532 __i915_gem_userptr_set_active(struct drm_i915_gem_object *obj,
537 /* During mm_invalidate_range we need to cancel any userptr that
538 * overlaps the range being invalidated. Doing so requires the
539 * struct_mutex, and that risks recursion. In order to cause
540 * recursion, the user must alias the userptr address space with
541 * a GTT mmapping (possible with a MAP_FIXED) - then when we have
542 * to invalidate that mmaping, mm_invalidate_range is called with
543 * the userptr address *and* the struct_mutex held. To prevent that
544 * we set a flag under the i915_mmu_notifier spinlock to indicate
545 * whether this object is valid.
547 #if defined(CONFIG_MMU_NOTIFIER)
548 if (obj->userptr.mmu_object == NULL)
551 spin_lock(&obj->userptr.mmu_object->mn->lock);
552 /* In order to serialise get_pages with an outstanding
553 * cancel_userptr, we must drop the struct_mutex and try again.
555 if (!value || !work_pending(&obj->userptr.mmu_object->work))
556 obj->userptr.mmu_object->active = value;
559 spin_unlock(&obj->userptr.mmu_object->mn->lock);
566 __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
568 struct get_pages_work *work = container_of(_work, typeof(*work), work);
569 struct drm_i915_gem_object *obj = work->obj;
570 struct drm_device *dev = obj->base.dev;
571 const int npages = obj->base.size >> PAGE_SHIFT;
578 pvec = kmalloc(npages*sizeof(struct page *),
579 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
581 pvec = drm_malloc_ab(npages, sizeof(struct page *));
583 struct mm_struct *mm = obj->userptr.mm->mm;
584 unsigned int flags = 0;
586 if (!obj->userptr.read_only)
589 down_read(&mm->mmap_sem);
590 while (pinned < npages) {
591 ret = get_user_pages(work->task, mm,
592 obj->userptr.ptr + pinned * PAGE_SIZE,
595 pvec + pinned, NULL);
601 up_read(&mm->mmap_sem);
604 mutex_lock(&dev->struct_mutex);
605 if (obj->userptr.work == &work->work) {
606 if (pinned == npages) {
607 ret = __i915_gem_userptr_set_pages(obj, pvec, npages);
609 list_add_tail(&obj->global_list,
610 &to_i915(dev)->mm.unbound_list);
611 obj->get_page.sg = obj->pages->sgl;
612 obj->get_page.last = 0;
616 obj->userptr.work = ERR_PTR(ret);
618 __i915_gem_userptr_set_active(obj, false);
621 obj->userptr.workers--;
622 drm_gem_object_unreference(&obj->base);
623 mutex_unlock(&dev->struct_mutex);
625 release_pages(pvec, pinned, 0);
626 drm_free_large(pvec);
628 put_task_struct(work->task);
633 __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
636 struct get_pages_work *work;
638 /* Spawn a worker so that we can acquire the
639 * user pages without holding our mutex. Access
640 * to the user pages requires mmap_sem, and we have
641 * a strict lock ordering of mmap_sem, struct_mutex -
642 * we already hold struct_mutex here and so cannot
643 * call gup without encountering a lock inversion.
645 * Userspace will keep on repeating the operation
646 * (thanks to EAGAIN) until either we hit the fast
647 * path or the worker completes. If the worker is
648 * cancelled or superseded, the task is still run
649 * but the results ignored. (This leads to
650 * complications that we may have a stray object
651 * refcount that we need to be wary of when
652 * checking for existing objects during creation.)
653 * If the worker encounters an error, it reports
654 * that error back to this function through
655 * obj->userptr.work = ERR_PTR.
657 if (obj->userptr.workers >= I915_GEM_USERPTR_MAX_WORKERS)
660 work = kmalloc(sizeof(*work), GFP_KERNEL);
664 obj->userptr.work = &work->work;
665 obj->userptr.workers++;
668 drm_gem_object_reference(&obj->base);
670 work->task = current;
671 get_task_struct(work->task);
673 INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
674 schedule_work(&work->work);
681 i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
683 const int num_pages = obj->base.size >> PAGE_SHIFT;
688 /* If userspace should engineer that these pages are replaced in
689 * the vma between us binding this page into the GTT and completion
690 * of rendering... Their loss. If they change the mapping of their
691 * pages they need to create a new bo to point to the new vma.
693 * However, that still leaves open the possibility of the vma
694 * being copied upon fork. Which falls under the same userspace
695 * synchronisation issue as a regular bo, except that this time
696 * the process may not be expecting that a particular piece of
697 * memory is tied to the GPU.
699 * Fortunately, we can hook into the mmu_notifier in order to
700 * discard the page references prior to anything nasty happening
701 * to the vma (discard or cloning) which should prevent the more
702 * egregious cases from causing harm.
704 if (IS_ERR(obj->userptr.work)) {
705 /* active flag will have been dropped already by the worker */
706 ret = PTR_ERR(obj->userptr.work);
707 obj->userptr.work = NULL;
710 if (obj->userptr.work)
711 /* active flag should still be held for the pending work */
714 /* Let the mmu-notifier know that we have begun and need cancellation */
715 ret = __i915_gem_userptr_set_active(obj, true);
721 if (obj->userptr.mm->mm == current->mm) {
722 pvec = kmalloc(num_pages*sizeof(struct page *),
723 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
725 pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
727 __i915_gem_userptr_set_active(obj, false);
732 pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
733 !obj->userptr.read_only, pvec);
738 ret = pinned, pinned = 0;
739 else if (pinned < num_pages)
740 ret = __i915_gem_userptr_get_pages_schedule(obj, &active);
742 ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
744 __i915_gem_userptr_set_active(obj, active);
745 release_pages(pvec, pinned, 0);
747 drm_free_large(pvec);
752 i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
754 struct sg_page_iter sg_iter;
756 BUG_ON(obj->userptr.work != NULL);
757 __i915_gem_userptr_set_active(obj, false);
759 if (obj->madv != I915_MADV_WILLNEED)
762 i915_gem_gtt_finish_object(obj);
764 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
765 struct page *page = sg_page_iter_page(&sg_iter);
768 set_page_dirty(page);
770 mark_page_accessed(page);
771 page_cache_release(page);
775 sg_free_table(obj->pages);
780 i915_gem_userptr_release(struct drm_i915_gem_object *obj)
782 i915_gem_userptr_release__mmu_notifier(obj);
783 i915_gem_userptr_release__mm_struct(obj);
787 i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
789 if (obj->userptr.mmu_object)
792 return i915_gem_userptr_init__mmu_notifier(obj, 0);
795 static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
796 .dmabuf_export = i915_gem_userptr_dmabuf_export,
797 .get_pages = i915_gem_userptr_get_pages,
798 .put_pages = i915_gem_userptr_put_pages,
799 .release = i915_gem_userptr_release,
803 * Creates a new mm object that wraps some normal memory from the process
804 * context - user memory.
806 * We impose several restrictions upon the memory being mapped
808 * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
809 * 2. It must be normal system memory, not a pointer into another map of IO
810 * space (e.g. it must not be a GTT mmapping of another object).
811 * 3. We only allow a bo as large as we could in theory map into the GTT,
812 * that is we limit the size to the total size of the GTT.
813 * 4. The bo is marked as being snoopable. The backing pages are left
814 * accessible directly by the CPU, but reads and writes by the GPU may
815 * incur the cost of a snoop (unless you have an LLC architecture).
817 * Synchronisation between multiple users and the GPU is left to userspace
818 * through the normal set-domain-ioctl. The kernel will enforce that the
819 * GPU relinquishes the VMA before it is returned back to the system
820 * i.e. upon free(), munmap() or process termination. However, the userspace
821 * malloc() library may not immediately relinquish the VMA after free() and
822 * instead reuse it whilst the GPU is still reading and writing to the VMA.
825 * Also note, that the object created here is not currently a "first class"
826 * object, in that several ioctls are banned. These are the CPU access
827 * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
828 * direct access via your pointer rather than use those ioctls. Another
829 * restriction is that we do not allow userptr surfaces to be pinned to the
830 * hardware and so we reject any attempt to create a framebuffer out of a
833 * If you think this is a good interface to use to pass GPU memory between
834 * drivers, please use dma-buf instead. In fact, wherever possible use
838 i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
840 struct drm_i915_gem_userptr *args = data;
841 struct drm_i915_gem_object *obj;
845 if (args->flags & ~(I915_USERPTR_READ_ONLY |
846 I915_USERPTR_UNSYNCHRONIZED))
849 if (!args->user_size)
852 if (offset_in_page(args->user_ptr | args->user_size))
855 if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE,
856 (char __user *)(unsigned long)args->user_ptr, args->user_size))
859 if (args->flags & I915_USERPTR_READ_ONLY) {
860 /* On almost all of the current hw, we cannot tell the GPU that a
861 * page is readonly, so this is just a placeholder in the uAPI.
866 obj = i915_gem_object_alloc(dev);
870 drm_gem_private_object_init(dev, &obj->base, args->user_size);
871 i915_gem_object_init(obj, &i915_gem_userptr_ops);
872 obj->cache_level = I915_CACHE_LLC;
873 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
874 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
876 obj->userptr.ptr = args->user_ptr;
877 obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
879 /* And keep a pointer to the current->mm for resolving the user pages
880 * at binding. This means that we need to hook into the mmu_notifier
881 * in order to detect if the mmu is destroyed.
883 ret = i915_gem_userptr_init__mm_struct(obj);
885 ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
887 ret = drm_gem_handle_create(file, &obj->base, &handle);
889 /* drop reference from allocate - handle holds it now */
890 drm_gem_object_unreference_unlocked(&obj->base);
894 args->handle = handle;
899 i915_gem_init_userptr(struct drm_device *dev)
901 struct drm_i915_private *dev_priv = to_i915(dev);
902 mutex_init(&dev_priv->mm_lock);
903 hash_init(dev_priv->mm_structs);