2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "intel_ringbuffer.h"
29 #include "intel_frontbuffer.h"
31 #include <drm/drm_gem.h>
34 i915_vma_retire(struct i915_gem_active *active,
35 struct drm_i915_gem_request *rq)
37 const unsigned int idx = rq->engine->id;
38 struct i915_vma *vma =
39 container_of(active, struct i915_vma, last_read[idx]);
40 struct drm_i915_gem_object *obj = vma->obj;
42 GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));
44 i915_vma_clear_active(vma, idx);
45 if (i915_vma_is_active(vma))
48 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
49 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
50 if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
51 WARN_ON(i915_vma_unbind(vma));
53 GEM_BUG_ON(!i915_gem_object_is_active(obj));
54 if (--obj->active_count)
57 /* Bump our place on the bound list to keep it roughly in LRU order
58 * so that we don't steal from recently used but inactive objects
59 * (unless we are forced to ofc!)
62 list_move_tail(&obj->global_link, &rq->i915->mm.bound_list);
64 obj->mm.dirty = true; /* be paranoid */
66 if (i915_gem_object_has_active_reference(obj)) {
67 i915_gem_object_clear_active_reference(obj);
68 i915_gem_object_put(obj);
72 static struct i915_vma *
73 vma_create(struct drm_i915_gem_object *obj,
74 struct i915_address_space *vm,
75 const struct i915_ggtt_view *view)
78 struct rb_node *rb, **p;
81 /* The aliasing_ppgtt should never be used directly! */
82 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
84 vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL);
86 return ERR_PTR(-ENOMEM);
88 for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
89 init_request_active(&vma->last_read[i], i915_vma_retire);
90 init_request_active(&vma->last_fence, NULL);
93 vma->resv = obj->resv;
94 vma->size = obj->base.size;
95 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
97 if (view && view->type != I915_GGTT_VIEW_NORMAL) {
98 vma->ggtt_view = *view;
99 if (view->type == I915_GGTT_VIEW_PARTIAL) {
100 GEM_BUG_ON(range_overflows_t(u64,
101 view->partial.offset,
103 obj->base.size >> PAGE_SHIFT));
104 vma->size = view->partial.size;
105 vma->size <<= PAGE_SHIFT;
106 GEM_BUG_ON(vma->size >= obj->base.size);
107 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
108 vma->size = intel_rotation_info_size(&view->rotated);
109 vma->size <<= PAGE_SHIFT;
113 if (unlikely(vma->size > vm->total))
116 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
118 if (i915_is_ggtt(vm)) {
119 if (unlikely(overflows_type(vma->size, u32)))
122 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
123 i915_gem_object_get_tiling(obj),
124 i915_gem_object_get_stride(obj));
125 if (unlikely(vma->fence_size < vma->size || /* overflow */
126 vma->fence_size > vm->total))
129 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
131 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
132 i915_gem_object_get_tiling(obj),
133 i915_gem_object_get_stride(obj));
134 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
136 vma->flags |= I915_VMA_GGTT;
137 list_add(&vma->obj_link, &obj->vma_list);
139 i915_ppgtt_get(i915_vm_to_ppgtt(vm));
140 list_add_tail(&vma->obj_link, &obj->vma_list);
144 p = &obj->vma_tree.rb_node;
146 struct i915_vma *pos;
149 pos = rb_entry(rb, struct i915_vma, obj_node);
150 if (i915_vma_compare(pos, vm, view) < 0)
155 rb_link_node(&vma->obj_node, rb, p);
156 rb_insert_color(&vma->obj_node, &obj->vma_tree);
157 list_add(&vma->vm_link, &vm->unbound_list);
162 kmem_cache_free(vm->i915->vmas, vma);
163 return ERR_PTR(-E2BIG);
166 static struct i915_vma *
167 vma_lookup(struct drm_i915_gem_object *obj,
168 struct i915_address_space *vm,
169 const struct i915_ggtt_view *view)
173 rb = obj->vma_tree.rb_node;
175 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
178 cmp = i915_vma_compare(vma, vm, view);
192 * i915_vma_instance - return the singleton instance of the VMA
193 * @obj: parent &struct drm_i915_gem_object to be mapped
194 * @vm: address space in which the mapping is located
195 * @view: additional mapping requirements
197 * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
198 * the same @view characteristics. If a match is not found, one is created.
199 * Once created, the VMA is kept until either the object is freed, or the
200 * address space is closed.
202 * Must be called with struct_mutex held.
204 * Returns the vma, or an error pointer.
207 i915_vma_instance(struct drm_i915_gem_object *obj,
208 struct i915_address_space *vm,
209 const struct i915_ggtt_view *view)
211 struct i915_vma *vma;
213 lockdep_assert_held(&obj->base.dev->struct_mutex);
214 GEM_BUG_ON(view && !i915_is_ggtt(vm));
215 GEM_BUG_ON(vm->closed);
217 vma = vma_lookup(obj, vm, view);
219 vma = vma_create(obj, vm, view);
221 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_is_closed(vma));
222 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
223 GEM_BUG_ON(!IS_ERR(vma) && vma_lookup(obj, vm, view) != vma);
228 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
230 * @cache_level: mapping cache level
231 * @flags: flags like global or local mapping
233 * DMA addresses are taken from the scatter-gather table of this object (or of
234 * this VMA in case of non-default GGTT views) and PTE entries set up.
235 * Note that DMA addresses are also the only part of the SG table we care about.
237 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
244 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
245 GEM_BUG_ON(vma->size > vma->node.size);
247 if (GEM_WARN_ON(range_overflows(vma->node.start,
252 if (GEM_WARN_ON(!flags))
256 if (flags & PIN_GLOBAL)
257 bind_flags |= I915_VMA_GLOBAL_BIND;
258 if (flags & PIN_USER)
259 bind_flags |= I915_VMA_LOCAL_BIND;
261 vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
262 if (flags & PIN_UPDATE)
263 bind_flags |= vma_flags;
265 bind_flags &= ~vma_flags;
269 trace_i915_vma_bind(vma, bind_flags);
270 ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
274 vma->flags |= bind_flags;
277 set_bit(I915_BO_WAS_BOUND_BIT, &vma->obj->flags);
282 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
286 /* Access through the GTT requires the device to be awake. */
287 assert_rpm_wakelock_held(vma->vm->i915);
289 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
290 if (WARN_ON(!i915_vma_is_map_and_fenceable(vma)))
291 return IO_ERR_PTR(-ENODEV);
293 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
294 GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
298 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
302 return IO_ERR_PTR(-ENOMEM);
311 void i915_vma_unpin_and_release(struct i915_vma **p_vma)
313 struct i915_vma *vma;
314 struct drm_i915_gem_object *obj;
316 vma = fetch_and_zero(p_vma);
325 __i915_gem_object_release_unless_active(obj);
328 bool i915_vma_misplaced(const struct i915_vma *vma,
329 u64 size, u64 alignment, u64 flags)
331 if (!drm_mm_node_allocated(&vma->node))
334 if (vma->node.size < size)
337 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
338 if (alignment && !IS_ALIGNED(vma->node.start, alignment))
341 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
344 if (flags & PIN_OFFSET_BIAS &&
345 vma->node.start < (flags & PIN_OFFSET_MASK))
348 if (flags & PIN_OFFSET_FIXED &&
349 vma->node.start != (flags & PIN_OFFSET_MASK))
355 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
357 bool mappable, fenceable;
359 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
360 GEM_BUG_ON(!vma->fence_size);
363 * Explicitly disable for rotated VMA since the display does not
364 * need the fence and the VMA is not accessible to other users.
366 if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
369 fenceable = (vma->node.size >= vma->fence_size &&
370 IS_ALIGNED(vma->node.start, vma->fence_alignment));
372 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
374 if (mappable && fenceable)
375 vma->flags |= I915_VMA_CAN_FENCE;
377 vma->flags &= ~I915_VMA_CAN_FENCE;
380 static bool color_differs(struct drm_mm_node *node, unsigned long color)
382 return node->allocated && node->color != color;
385 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
387 struct drm_mm_node *node = &vma->node;
388 struct drm_mm_node *other;
391 * On some machines we have to be careful when putting differing types
392 * of snoopable memory together to avoid the prefetcher crossing memory
393 * domains and dying. During vm initialisation, we decide whether or not
394 * these constraints apply and set the drm_mm.color_adjust
397 if (vma->vm->mm.color_adjust == NULL)
400 /* Only valid to be called on an already inserted vma */
401 GEM_BUG_ON(!drm_mm_node_allocated(node));
402 GEM_BUG_ON(list_empty(&node->node_list));
404 other = list_prev_entry(node, node_list);
405 if (color_differs(other, cache_level) && !drm_mm_hole_follows(other))
408 other = list_next_entry(node, node_list);
409 if (color_differs(other, cache_level) && !drm_mm_hole_follows(node))
416 * i915_vma_insert - finds a slot for the vma in its address space
418 * @size: requested size in bytes (can be larger than the VMA)
419 * @alignment: required alignment
420 * @flags: mask of PIN_* flags to use
422 * First we try to allocate some free space that meets the requirements for
423 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
424 * preferrably the oldest idle entry to make room for the new VMA.
427 * 0 on success, negative error code otherwise.
430 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
432 struct drm_i915_private *dev_priv = vma->vm->i915;
433 struct drm_i915_gem_object *obj = vma->obj;
437 GEM_BUG_ON(i915_vma_is_closed(vma));
438 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
439 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
441 size = max(size, vma->size);
442 alignment = max(alignment, vma->display_alignment);
443 if (flags & PIN_MAPPABLE) {
444 size = max_t(typeof(size), size, vma->fence_size);
445 alignment = max_t(typeof(alignment),
446 alignment, vma->fence_alignment);
449 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
450 GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
451 GEM_BUG_ON(!is_power_of_2(alignment));
453 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
454 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
456 end = vma->vm->total;
457 if (flags & PIN_MAPPABLE)
458 end = min_t(u64, end, dev_priv->ggtt.mappable_end);
459 if (flags & PIN_ZONE_4G)
460 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
461 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
463 /* If binding the object/GGTT view requires more space than the entire
464 * aperture has, reject it early before evicting everything in a vain
465 * attempt to find space.
468 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
469 size, obj->base.size,
470 flags & PIN_MAPPABLE ? "mappable" : "total",
475 ret = i915_gem_object_pin_pages(obj);
479 if (flags & PIN_OFFSET_FIXED) {
480 u64 offset = flags & PIN_OFFSET_MASK;
481 if (!IS_ALIGNED(offset, alignment) ||
482 range_overflows(offset, size, end)) {
487 ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
488 size, offset, obj->cache_level,
493 ret = i915_gem_gtt_insert(vma->vm, &vma->node,
494 size, alignment, obj->cache_level,
499 GEM_BUG_ON(vma->node.start < start);
500 GEM_BUG_ON(vma->node.start + vma->node.size > end);
502 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
503 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
505 list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
506 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
508 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
513 i915_gem_object_unpin_pages(obj);
518 i915_vma_remove(struct i915_vma *vma)
520 struct drm_i915_gem_object *obj = vma->obj;
522 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
523 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
525 drm_mm_remove_node(&vma->node);
526 list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
528 /* Since the unbound list is global, only move to that list if
529 * no more VMAs exist.
531 if (--obj->bind_count == 0)
532 list_move_tail(&obj->global_link,
533 &to_i915(obj->base.dev)->mm.unbound_list);
535 /* And finally now the object is completely decoupled from this vma,
536 * we can drop its hold on the backing storage and allow it to be
537 * reaped by the shrinker.
539 i915_gem_object_unpin_pages(obj);
540 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
543 int __i915_vma_do_pin(struct i915_vma *vma,
544 u64 size, u64 alignment, u64 flags)
546 const unsigned int bound = vma->flags;
549 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
550 GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
551 GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
553 if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
558 if ((bound & I915_VMA_BIND_MASK) == 0) {
559 ret = i915_vma_insert(vma, size, alignment, flags);
564 ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
568 if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
569 __i915_vma_set_map_and_fenceable(vma);
571 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
572 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
576 if ((bound & I915_VMA_BIND_MASK) == 0) {
577 GEM_BUG_ON(vma->pages);
578 i915_vma_remove(vma);
581 __i915_vma_unpin(vma);
585 static void i915_vma_destroy(struct i915_vma *vma)
589 GEM_BUG_ON(vma->node.allocated);
590 GEM_BUG_ON(i915_vma_is_active(vma));
591 GEM_BUG_ON(!i915_vma_is_closed(vma));
592 GEM_BUG_ON(vma->fence);
594 for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
595 GEM_BUG_ON(i915_gem_active_isset(&vma->last_read[i]));
596 GEM_BUG_ON(i915_gem_active_isset(&vma->last_fence));
598 list_del(&vma->obj_link);
599 list_del(&vma->vm_link);
601 if (!i915_vma_is_ggtt(vma))
602 i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
604 kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
607 void i915_vma_close(struct i915_vma *vma)
609 GEM_BUG_ON(i915_vma_is_closed(vma));
610 vma->flags |= I915_VMA_CLOSED;
612 rb_erase(&vma->obj_node, &vma->obj->vma_tree);
614 if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
615 WARN_ON(i915_vma_unbind(vma));
618 static void __i915_vma_iounmap(struct i915_vma *vma)
620 GEM_BUG_ON(i915_vma_is_pinned(vma));
622 if (vma->iomap == NULL)
625 io_mapping_unmap(vma->iomap);
629 int i915_vma_unbind(struct i915_vma *vma)
631 struct drm_i915_gem_object *obj = vma->obj;
632 unsigned long active;
635 lockdep_assert_held(&obj->base.dev->struct_mutex);
637 /* First wait upon any activity as retiring the request may
638 * have side-effects such as unpinning or even unbinding this vma.
640 active = i915_vma_get_active(vma);
644 /* When a closed VMA is retired, it is unbound - eek.
645 * In order to prevent it from being recursively closed,
646 * take a pin on the vma so that the second unbind is
649 * Even more scary is that the retire callback may free
650 * the object (last active vma). To prevent the explosion
651 * we defer the actual object free to a worker that can
652 * only proceed once it acquires the struct_mutex (which
653 * we currently hold, therefore it cannot free this object
654 * before we are finished).
658 for_each_active(active, idx) {
659 ret = i915_gem_active_retire(&vma->last_read[idx],
660 &vma->vm->i915->drm.struct_mutex);
666 ret = i915_gem_active_retire(&vma->last_fence,
667 &vma->vm->i915->drm.struct_mutex);
670 __i915_vma_unpin(vma);
674 GEM_BUG_ON(i915_vma_is_active(vma));
676 if (i915_vma_is_pinned(vma))
679 if (!drm_mm_node_allocated(&vma->node))
682 GEM_BUG_ON(obj->bind_count == 0);
683 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
685 if (i915_vma_is_map_and_fenceable(vma)) {
686 /* release the fence reg _after_ flushing */
687 ret = i915_vma_put_fence(vma);
691 /* Force a pagefault for domain tracking on next user access */
692 i915_gem_release_mmap(obj);
694 __i915_vma_iounmap(vma);
695 vma->flags &= ~I915_VMA_CAN_FENCE;
698 if (likely(!vma->vm->closed)) {
699 trace_i915_vma_unbind(vma);
700 vma->vm->unbind_vma(vma);
702 vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
704 if (vma->pages != obj->mm.pages) {
705 GEM_BUG_ON(!vma->pages);
706 sg_free_table(vma->pages);
711 i915_vma_remove(vma);
714 if (unlikely(i915_vma_is_closed(vma)))
715 i915_vma_destroy(vma);
720 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
721 #include "selftests/i915_vma.c"