1 // SPDX-License-Identifier: MIT
3 * Copyright © 2020 Intel Corporation
6 #include "i915_selftest.h"
8 #include "gt/intel_engine_pm.h"
9 #include "selftests/igt_flush_test.h"
11 static u64 read_reloc(const u32 *map, int x, const u64 mask)
15 memcpy(&reloc, &map[x], sizeof(reloc));
19 static int __igt_gpu_reloc(struct i915_execbuffer *eb,
20 struct drm_i915_gem_object *obj)
22 const unsigned int offsets[] = { 8, 3, 0 };
24 GENMASK_ULL(eb->reloc_cache.use_64bit_reloc ? 63 : 31, 0);
25 const u32 *map = page_mask_bits(obj->mm.mapping);
26 struct i915_request *rq;
31 vma = i915_vma_instance(obj, eb->context->vm, NULL);
35 err = i915_gem_object_lock(obj, &eb->ww);
39 err = i915_vma_pin_ww(vma, &eb->ww, 0, 0, PIN_USER | PIN_HIGH);
44 err = __reloc_entry_gpu(eb, vma, offsets[0] * sizeof(u32), 0);
49 err = __reloc_entry_gpu(eb, vma, offsets[1] * sizeof(u32), 1);
53 /* Skip to the end of the cmd page */
54 i = PAGE_SIZE / sizeof(u32) - 1;
55 i -= eb->reloc_cache.rq_size;
56 memset32(eb->reloc_cache.rq_cmd + eb->reloc_cache.rq_size,
58 eb->reloc_cache.rq_size += i;
60 /* Force next batch */
61 err = __reloc_entry_gpu(eb, vma, offsets[2] * sizeof(u32), 2);
65 GEM_BUG_ON(!eb->reloc_cache.rq);
66 rq = i915_request_get(eb->reloc_cache.rq);
67 reloc_gpu_flush(eb, &eb->reloc_cache);
68 GEM_BUG_ON(eb->reloc_cache.rq);
70 err = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE, HZ / 2);
72 intel_gt_set_wedged(eb->engine->gt);
76 if (!i915_request_completed(rq)) {
77 pr_err("%s: did not wait for relocations!\n", eb->engine->name);
82 for (i = 0; i < ARRAY_SIZE(offsets); i++) {
83 u64 reloc = read_reloc(map, offsets[i], mask);
86 pr_err("%s[%d]: map[%d] %llx != %x\n",
87 eb->engine->name, i, offsets[i], reloc, i);
92 igt_hexdump(map, 4096);
106 static int igt_gpu_reloc(void *arg)
108 struct i915_execbuffer eb;
109 struct drm_i915_gem_object *scratch;
115 scratch = i915_gem_object_create_internal(eb.i915, 4096);
117 return PTR_ERR(scratch);
119 map = i915_gem_object_pin_map(scratch, I915_MAP_WC);
125 intel_gt_pm_get(&eb.i915->gt);
127 for_each_uabi_engine(eb.engine, eb.i915) {
128 reloc_cache_init(&eb.reloc_cache, eb.i915);
129 memset(map, POISON_INUSE, 4096);
131 intel_engine_pm_get(eb.engine);
132 eb.context = intel_context_create(eb.engine);
133 if (IS_ERR(eb.context)) {
134 err = PTR_ERR(eb.context);
137 eb.reloc_pool = NULL;
138 eb.reloc_context = NULL;
140 i915_gem_ww_ctx_init(&eb.ww, false);
142 err = intel_context_pin_ww(eb.context, &eb.ww);
144 err = __igt_gpu_reloc(&eb, scratch);
146 intel_context_unpin(eb.context);
148 if (err == -EDEADLK) {
149 err = i915_gem_ww_ctx_backoff(&eb.ww);
153 i915_gem_ww_ctx_fini(&eb.ww);
156 intel_gt_buffer_pool_put(eb.reloc_pool);
157 if (eb.reloc_context)
158 intel_context_put(eb.reloc_context);
160 intel_context_put(eb.context);
162 intel_engine_pm_put(eb.engine);
167 if (igt_flush_test(eb.i915))
170 intel_gt_pm_put(&eb.i915->gt);
172 i915_gem_object_put(scratch);
176 int i915_gem_execbuffer_live_selftests(struct drm_i915_private *i915)
178 static const struct i915_subtest tests[] = {
179 SUBTEST(igt_gpu_reloc),
182 if (intel_gt_is_wedged(&i915->gt))
185 return i915_live_subtests(tests, i915);