2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/oom.h>
26 #include <linux/shmem_fs.h>
27 #include <linux/slab.h>
28 #include <linux/swap.h>
29 #include <linux/pci.h>
30 #include <linux/dma-buf.h>
31 #include <linux/vmalloc.h>
33 #include <drm/i915_drm.h>
36 #include "i915_trace.h"
38 static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
40 switch (mutex_trylock_recursive(&dev_priv->drm.struct_mutex)) {
41 case MUTEX_TRYLOCK_RECURSIVE:
45 case MUTEX_TRYLOCK_FAILED:
50 if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
54 } while (!need_resched());
58 case MUTEX_TRYLOCK_SUCCESS:
66 static void shrinker_unlock(struct drm_i915_private *dev_priv, bool unlock)
71 mutex_unlock(&dev_priv->drm.struct_mutex);
74 static bool any_vma_pinned(struct drm_i915_gem_object *obj)
78 list_for_each_entry(vma, &obj->vma_list, obj_link) {
79 /* Only GGTT vma may be permanently pinned, and are always
80 * at the start of the list. We can stop hunting as soon
81 * as we see a ppGTT vma.
83 if (!i915_vma_is_ggtt(vma))
86 if (i915_vma_is_pinned(vma))
93 static bool swap_available(void)
95 return get_nr_swap_pages() > 0;
98 static bool can_release_pages(struct drm_i915_gem_object *obj)
103 /* Consider only shrinkable ojects. */
104 if (!i915_gem_object_is_shrinkable(obj))
107 /* Only report true if by unbinding the object and putting its pages
108 * we can actually make forward progress towards freeing physical
111 * If the pages are pinned for any other reason than being bound
112 * to the GPU, simply unbinding from the GPU is not going to succeed
113 * in releasing our pin count on the pages themselves.
115 if (atomic_read(&obj->mm.pages_pin_count) > obj->bind_count)
118 if (any_vma_pinned(obj))
121 /* We can only return physical pages to the system if we can either
122 * discard the contents (because the user has marked them as being
123 * purgeable) or if we can move their contents out to swap.
125 return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
128 static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
130 if (i915_gem_object_unbind(obj) == 0)
131 __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
132 return !READ_ONCE(obj->mm.pages);
136 * i915_gem_shrink - Shrink buffer object caches
137 * @dev_priv: i915 device
138 * @target: amount of memory to make available, in pages
139 * @nr_scanned: optional output for number of pages scanned (incremental)
140 * @flags: control flags for selecting cache types
142 * This function is the main interface to the shrinker. It will try to release
143 * up to @target pages of main memory backing storage from buffer objects.
144 * Selection of the specific caches can be done with @flags. This is e.g. useful
145 * when purgeable objects should be removed from caches preferentially.
147 * Note that it's not guaranteed that released amount is actually available as
148 * free system memory - the pages might still be in-used to due to other reasons
149 * (like cpu mmaps) or the mm core has reused them before we could grab them.
150 * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to
151 * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all().
153 * Also note that any kind of pinning (both per-vma address space pins and
154 * backing storage pins at the buffer object level) result in the shrinker code
155 * having to skip the object.
158 * The number of pages of backing storage actually released.
161 i915_gem_shrink(struct drm_i915_private *dev_priv,
162 unsigned long target,
163 unsigned long *nr_scanned,
167 struct list_head *list;
170 { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
171 { &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
174 unsigned long count = 0;
175 unsigned long scanned = 0;
178 if (!shrinker_lock(dev_priv, &unlock))
181 trace_i915_gem_shrink(dev_priv, target, flags);
182 i915_gem_retire_requests(dev_priv);
185 * Unbinding of objects will require HW access; Let us not wake the
186 * device just to recover a little memory. If absolutely necessary,
187 * we will force the wake during oom-notifier.
189 if ((flags & I915_SHRINK_BOUND) &&
190 !intel_runtime_pm_get_if_in_use(dev_priv))
191 flags &= ~I915_SHRINK_BOUND;
194 * As we may completely rewrite the (un)bound list whilst unbinding
195 * (due to retiring requests) we have to strictly process only
196 * one element of the list at the time, and recheck the list
197 * on every iteration.
199 * In particular, we must hold a reference whilst removing the
200 * object as we may end up waiting for and/or retiring the objects.
201 * This might release the final reference (held by the active list)
202 * and result in the object being freed from under us. This is
203 * similar to the precautions the eviction code must take whilst
206 * Also note that although these lists do not hold a reference to
207 * the object we can safely grab one here: The final object
208 * unreferencing and the bound_list are both protected by the
209 * dev->struct_mutex and so we won't ever be able to observe an
210 * object on the bound_list with a reference count equals 0.
212 for (phase = phases; phase->list; phase++) {
213 struct list_head still_in_list;
214 struct drm_i915_gem_object *obj;
216 if ((flags & phase->bit) == 0)
219 INIT_LIST_HEAD(&still_in_list);
220 while (count < target &&
221 (obj = list_first_entry_or_null(phase->list,
224 list_move_tail(&obj->global_link, &still_in_list);
225 if (!obj->mm.pages) {
226 list_del_init(&obj->global_link);
230 if (flags & I915_SHRINK_PURGEABLE &&
231 obj->mm.madv != I915_MADV_DONTNEED)
234 if (flags & I915_SHRINK_VMAPS &&
235 !is_vmalloc_addr(obj->mm.mapping))
238 if (!(flags & I915_SHRINK_ACTIVE) &&
239 (i915_gem_object_is_active(obj) ||
240 i915_gem_object_is_framebuffer(obj)))
243 if (!can_release_pages(obj))
246 if (unsafe_drop_pages(obj)) {
247 /* May arrive from get_pages on another bo */
248 mutex_lock_nested(&obj->mm.lock,
250 if (!obj->mm.pages) {
251 __i915_gem_object_invalidate(obj);
252 list_del_init(&obj->global_link);
253 count += obj->base.size >> PAGE_SHIFT;
255 mutex_unlock(&obj->mm.lock);
256 scanned += obj->base.size >> PAGE_SHIFT;
259 list_splice_tail(&still_in_list, phase->list);
262 if (flags & I915_SHRINK_BOUND)
263 intel_runtime_pm_put(dev_priv);
265 i915_gem_retire_requests(dev_priv);
267 shrinker_unlock(dev_priv, unlock);
270 *nr_scanned += scanned;
275 * i915_gem_shrink_all - Shrink buffer object caches completely
276 * @dev_priv: i915 device
278 * This is a simple wraper around i915_gem_shrink() to aggressively shrink all
279 * caches completely. It also first waits for and retires all outstanding
280 * requests to also be able to release backing storage for active objects.
282 * This should only be used in code to intentionally quiescent the gpu or as a
283 * last-ditch effort when memory seems to have run out.
286 * The number of pages of backing storage actually released.
288 unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
292 intel_runtime_pm_get(dev_priv);
293 freed = i915_gem_shrink(dev_priv, -1UL, NULL,
295 I915_SHRINK_UNBOUND |
297 intel_runtime_pm_put(dev_priv);
303 i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
305 struct drm_i915_private *dev_priv =
306 container_of(shrinker, struct drm_i915_private, mm.shrinker);
307 struct drm_i915_gem_object *obj;
311 if (!shrinker_lock(dev_priv, &unlock))
314 i915_gem_retire_requests(dev_priv);
317 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link)
318 if (can_release_pages(obj))
319 count += obj->base.size >> PAGE_SHIFT;
321 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
322 if (!i915_gem_object_is_active(obj) && can_release_pages(obj))
323 count += obj->base.size >> PAGE_SHIFT;
326 shrinker_unlock(dev_priv, unlock);
332 i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
334 struct drm_i915_private *dev_priv =
335 container_of(shrinker, struct drm_i915_private, mm.shrinker);
341 if (!shrinker_lock(dev_priv, &unlock))
344 freed = i915_gem_shrink(dev_priv,
348 I915_SHRINK_UNBOUND |
349 I915_SHRINK_PURGEABLE);
350 if (freed < sc->nr_to_scan)
351 freed += i915_gem_shrink(dev_priv,
352 sc->nr_to_scan - sc->nr_scanned,
355 I915_SHRINK_UNBOUND);
356 if (freed < sc->nr_to_scan && current_is_kswapd()) {
357 intel_runtime_pm_get(dev_priv);
358 freed += i915_gem_shrink(dev_priv,
359 sc->nr_to_scan - sc->nr_scanned,
363 I915_SHRINK_UNBOUND);
364 intel_runtime_pm_put(dev_priv);
367 shrinker_unlock(dev_priv, unlock);
369 return sc->nr_scanned ? freed : SHRINK_STOP;
373 shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv, bool *unlock,
376 unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms);
379 if (i915_gem_wait_for_idle(dev_priv, 0) == 0 &&
380 shrinker_lock(dev_priv, unlock))
383 schedule_timeout_killable(1);
384 if (fatal_signal_pending(current))
387 if (time_after(jiffies, timeout)) {
388 pr_err("Unable to lock GPU to purge memory.\n");
397 i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
399 struct drm_i915_private *dev_priv =
400 container_of(nb, struct drm_i915_private, mm.oom_notifier);
401 struct drm_i915_gem_object *obj;
402 unsigned long unevictable, bound, unbound, freed_pages;
405 if (!shrinker_lock_uninterruptible(dev_priv, &unlock, 5000))
408 freed_pages = i915_gem_shrink_all(dev_priv);
410 /* Because we may be allocating inside our own driver, we cannot
411 * assert that there are no objects with pinned pages that are not
412 * being pointed to by hardware.
414 unbound = bound = unevictable = 0;
415 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
419 if (!can_release_pages(obj))
420 unevictable += obj->base.size >> PAGE_SHIFT;
422 unbound += obj->base.size >> PAGE_SHIFT;
424 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
428 if (!can_release_pages(obj))
429 unevictable += obj->base.size >> PAGE_SHIFT;
431 bound += obj->base.size >> PAGE_SHIFT;
434 shrinker_unlock(dev_priv, unlock);
436 if (freed_pages || unbound || bound)
437 pr_info("Purging GPU memory, %lu pages freed, "
438 "%lu pages still pinned.\n",
439 freed_pages, unevictable);
440 if (unbound || bound)
441 pr_err("%lu and %lu pages still available in the "
442 "bound and unbound GPU page lists.\n",
445 *(unsigned long *)ptr += freed_pages;
450 i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
452 struct drm_i915_private *dev_priv =
453 container_of(nb, struct drm_i915_private, mm.vmap_notifier);
454 struct i915_vma *vma, *next;
455 unsigned long freed_pages = 0;
459 if (!shrinker_lock_uninterruptible(dev_priv, &unlock, 5000))
462 /* Force everything onto the inactive lists */
463 ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);
467 intel_runtime_pm_get(dev_priv);
468 freed_pages += i915_gem_shrink(dev_priv, -1UL, NULL,
470 I915_SHRINK_UNBOUND |
473 intel_runtime_pm_put(dev_priv);
475 /* We also want to clear any cached iomaps as they wrap vmap */
476 list_for_each_entry_safe(vma, next,
477 &dev_priv->ggtt.base.inactive_list, vm_link) {
478 unsigned long count = vma->node.size >> PAGE_SHIFT;
479 if (vma->iomap && i915_vma_unbind(vma) == 0)
480 freed_pages += count;
484 shrinker_unlock(dev_priv, unlock);
486 *(unsigned long *)ptr += freed_pages;
491 * i915_gem_shrinker_init - Initialize i915 shrinker
492 * @dev_priv: i915 device
494 * This function registers and sets up the i915 shrinker and OOM handler.
496 void i915_gem_shrinker_init(struct drm_i915_private *dev_priv)
498 dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
499 dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
500 dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
501 WARN_ON(register_shrinker(&dev_priv->mm.shrinker));
503 dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
504 WARN_ON(register_oom_notifier(&dev_priv->mm.oom_notifier));
506 dev_priv->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap;
507 WARN_ON(register_vmap_purge_notifier(&dev_priv->mm.vmap_notifier));
511 * i915_gem_shrinker_cleanup - Clean up i915 shrinker
512 * @dev_priv: i915 device
514 * This function unregisters the i915 shrinker and OOM handler.
516 void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv)
518 WARN_ON(unregister_vmap_purge_notifier(&dev_priv->mm.vmap_notifier));
519 WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
520 unregister_shrinker(&dev_priv->mm.shrinker);