1 // SPDX-License-Identifier: MIT
3 * Copyright © 2019 Intel Corporation
6 #include "i915_selftest.h"
8 #include "gt/intel_engine_user.h"
9 #include "gt/intel_gt.h"
10 #include "gt/intel_gpu_commands.h"
11 #include "gem/i915_gem_lmem.h"
13 #include "selftests/igt_flush_test.h"
14 #include "selftests/mock_drm.h"
15 #include "selftests/i915_random.h"
16 #include "huge_gem_object.h"
17 #include "mock_context.h"
19 static int __igt_client_fill(struct intel_engine_cs *engine)
21 struct intel_context *ce = engine->kernel_context;
22 struct drm_i915_gem_object *obj;
23 struct rnd_state prng;
28 prandom_seed_state(&prng, i915_selftest.random_seed);
30 intel_engine_pm_get(engine);
32 const u32 max_block_size = S16_MAX * PAGE_SIZE;
33 u32 sz = min_t(u64, ce->vm->total >> 4, prandom_u32_state(&prng));
34 u32 phys_sz = sz % (max_block_size + 1);
35 u32 val = prandom_u32_state(&prng);
38 sz = round_up(sz, PAGE_SIZE);
39 phys_sz = round_up(phys_sz, PAGE_SIZE);
41 pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__,
44 obj = huge_gem_object(engine->i915, phys_sz, sz);
50 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
57 * XXX: The goal is move this to get_pages, so try to dirty the
58 * CPU cache first to check that we do the required clflush
59 * before scheduling the blt for !llc platforms. This matches
60 * some version of reality where at get_pages the pages
61 * themselves may not yet be coherent with the GPU(swap-in). If
62 * we are missing the flush then we should see the stale cache
63 * values after we do the set_to_cpu_domain and pick it up as a
66 memset32(vaddr, val ^ 0xdeadbeaf,
67 huge_gem_object_phys_size(obj) / sizeof(u32));
69 if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
70 obj->cache_dirty = true;
72 err = i915_gem_schedule_fill_pages_blt(obj, ce, obj->mm.pages,
78 i915_gem_object_lock(obj, NULL);
79 err = i915_gem_object_set_to_cpu_domain(obj, false);
80 i915_gem_object_unlock(obj);
84 for (i = 0; i < huge_gem_object_phys_size(obj) / sizeof(u32); ++i) {
85 if (vaddr[i] != val) {
86 pr_err("vaddr[%u]=%x, expected=%x\n", i,
93 i915_gem_object_unpin_map(obj);
94 i915_gem_object_put(obj);
95 } while (!time_after(jiffies, end));
100 i915_gem_object_unpin_map(obj);
102 i915_gem_object_put(obj);
106 intel_engine_pm_put(engine);
111 static int igt_client_fill(void *arg)
116 struct intel_engine_cs *engine;
119 engine = intel_engine_lookup_user(arg,
120 I915_ENGINE_CLASS_COPY,
125 err = __igt_client_fill(engine);
137 struct i915_vma *vma;
143 struct intel_context *ce;
144 struct blit_buffer buffers[3];
145 struct blit_buffer scratch;
146 struct i915_vma *batch;
152 static int prepare_blit(const struct tiled_blits *t,
153 struct blit_buffer *dst,
154 struct blit_buffer *src,
155 struct drm_i915_gem_object *batch)
157 const int gen = INTEL_GEN(to_i915(batch->base.dev));
158 bool use_64b_reloc = gen >= 8;
159 u32 src_pitch, dst_pitch;
162 cs = i915_gem_object_pin_map(batch, I915_MAP_WC);
166 *cs++ = MI_LOAD_REGISTER_IMM(1);
167 *cs++ = i915_mmio_reg_offset(BCS_SWCTRL);
168 cmd = (BCS_SRC_Y | BCS_DST_Y) << 16;
169 if (src->tiling == I915_TILING_Y)
171 if (dst->tiling == I915_TILING_Y)
183 cmd = XY_SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (8 - 2);
187 src_pitch = t->width * 4;
189 cmd |= XY_SRC_COPY_BLT_SRC_TILED;
193 dst_pitch = t->width * 4;
195 cmd |= XY_SRC_COPY_BLT_DST_TILED;
200 *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | dst_pitch;
202 *cs++ = t->height << 16 | t->width;
203 *cs++ = lower_32_bits(dst->vma->node.start);
205 *cs++ = upper_32_bits(dst->vma->node.start);
208 *cs++ = lower_32_bits(src->vma->node.start);
210 *cs++ = upper_32_bits(src->vma->node.start);
212 *cs++ = MI_BATCH_BUFFER_END;
214 i915_gem_object_flush_map(batch);
215 i915_gem_object_unpin_map(batch);
220 static void tiled_blits_destroy_buffers(struct tiled_blits *t)
224 for (i = 0; i < ARRAY_SIZE(t->buffers); i++)
225 i915_vma_put(t->buffers[i].vma);
227 i915_vma_put(t->scratch.vma);
228 i915_vma_put(t->batch);
231 static struct i915_vma *
232 __create_vma(struct tiled_blits *t, size_t size, bool lmem)
234 struct drm_i915_private *i915 = t->ce->vm->i915;
235 struct drm_i915_gem_object *obj;
236 struct i915_vma *vma;
239 obj = i915_gem_object_create_lmem(i915, size, 0);
241 obj = i915_gem_object_create_shmem(i915, size);
243 return ERR_CAST(obj);
245 vma = i915_vma_instance(obj, t->ce->vm, NULL);
247 i915_gem_object_put(obj);
252 static struct i915_vma *create_vma(struct tiled_blits *t, bool lmem)
254 return __create_vma(t, PAGE_ALIGN(t->width * t->height * 4), lmem);
257 static int tiled_blits_create_buffers(struct tiled_blits *t,
258 int width, int height,
259 struct rnd_state *prng)
261 struct drm_i915_private *i915 = t->ce->engine->i915;
267 t->batch = __create_vma(t, PAGE_SIZE, false);
268 if (IS_ERR(t->batch))
269 return PTR_ERR(t->batch);
271 t->scratch.vma = create_vma(t, false);
272 if (IS_ERR(t->scratch.vma)) {
273 i915_vma_put(t->batch);
274 return PTR_ERR(t->scratch.vma);
277 for (i = 0; i < ARRAY_SIZE(t->buffers); i++) {
278 struct i915_vma *vma;
280 vma = create_vma(t, HAS_LMEM(i915) && i % 2);
282 tiled_blits_destroy_buffers(t);
286 t->buffers[i].vma = vma;
287 t->buffers[i].tiling =
288 i915_prandom_u32_max_state(I915_TILING_Y + 1, prng);
294 static void fill_scratch(struct tiled_blits *t, u32 *vaddr, u32 val)
298 t->scratch.start_val = val;
299 for (i = 0; i < t->width * t->height; i++)
302 i915_gem_object_flush_map(t->scratch.vma->obj);
305 static u64 swizzle_bit(unsigned int bit, u64 offset)
307 return (offset & BIT_ULL(bit)) >> (bit - 6);
310 static u64 tiled_offset(const struct intel_gt *gt,
315 unsigned int swizzle;
318 if (tiling == I915_TILING_NONE)
321 y = div64_u64_rem(v, stride, &x);
323 if (tiling == I915_TILING_X) {
324 v = div64_u64_rem(y, 8, &y) * stride * 8;
326 v += div64_u64_rem(x, 512, &x) << 12;
329 swizzle = gt->ggtt->bit_6_swizzle_x;
331 const unsigned int ytile_span = 16;
332 const unsigned int ytile_height = 512;
334 v = div64_u64_rem(y, 32, &y) * stride * 32;
336 v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
339 swizzle = gt->ggtt->bit_6_swizzle_y;
343 case I915_BIT_6_SWIZZLE_9:
344 v ^= swizzle_bit(9, v);
346 case I915_BIT_6_SWIZZLE_9_10:
347 v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v);
349 case I915_BIT_6_SWIZZLE_9_11:
350 v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v);
352 case I915_BIT_6_SWIZZLE_9_10_11:
353 v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v);
360 static const char *repr_tiling(int tiling)
363 case I915_TILING_NONE: return "linear";
364 case I915_TILING_X: return "X";
365 case I915_TILING_Y: return "Y";
366 default: return "unknown";
370 static int verify_buffer(const struct tiled_blits *t,
371 struct blit_buffer *buf,
372 struct rnd_state *prng)
378 x = i915_prandom_u32_max_state(t->width, prng);
379 y = i915_prandom_u32_max_state(t->height, prng);
380 p = y * t->width + x;
382 vaddr = i915_gem_object_pin_map(buf->vma->obj, I915_MAP_WC);
384 return PTR_ERR(vaddr);
386 if (vaddr[0] != buf->start_val) {
389 u64 v = tiled_offset(buf->vma->vm->gt,
393 if (vaddr[v / sizeof(*vaddr)] != buf->start_val + p)
397 pr_err("Invalid %s tiling detected at (%d, %d), start_val %x\n",
398 repr_tiling(buf->tiling),
399 x, y, buf->start_val);
400 igt_hexdump(vaddr, 4096);
403 i915_gem_object_unpin_map(buf->vma->obj);
407 static int move_to_active(struct i915_vma *vma,
408 struct i915_request *rq,
414 err = i915_request_await_object(rq, vma->obj, false);
416 err = i915_vma_move_to_active(vma, rq, flags);
417 i915_vma_unlock(vma);
422 static int pin_buffer(struct i915_vma *vma, u64 addr)
426 if (drm_mm_node_allocated(&vma->node) && vma->node.start != addr) {
427 err = i915_vma_unbind(vma);
432 err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED | addr);
440 tiled_blit(struct tiled_blits *t,
441 struct blit_buffer *dst, u64 dst_addr,
442 struct blit_buffer *src, u64 src_addr)
444 struct i915_request *rq;
447 err = pin_buffer(src->vma, src_addr);
449 pr_err("Cannot pin src @ %llx\n", src_addr);
453 err = pin_buffer(dst->vma, dst_addr);
455 pr_err("Cannot pin dst @ %llx\n", dst_addr);
459 err = i915_vma_pin(t->batch, 0, 0, PIN_USER | PIN_HIGH);
461 pr_err("cannot pin batch\n");
465 err = prepare_blit(t, dst, src, t->batch->obj);
469 rq = intel_context_create_request(t->ce);
475 err = move_to_active(t->batch, rq, 0);
477 err = move_to_active(src->vma, rq, 0);
479 err = move_to_active(dst->vma, rq, 0);
481 err = rq->engine->emit_bb_start(rq,
482 t->batch->node.start,
485 i915_request_get(rq);
486 i915_request_add(rq);
487 if (i915_request_wait(rq, 0, HZ / 2) < 0)
489 i915_request_put(rq);
491 dst->start_val = src->start_val;
493 i915_vma_unpin(t->batch);
495 i915_vma_unpin(dst->vma);
497 i915_vma_unpin(src->vma);
501 static struct tiled_blits *
502 tiled_blits_create(struct intel_engine_cs *engine, struct rnd_state *prng)
504 struct drm_mm_node hole;
505 struct tiled_blits *t;
509 t = kzalloc(sizeof(*t), GFP_KERNEL);
511 return ERR_PTR(-ENOMEM);
513 t->ce = intel_context_create(engine);
515 err = PTR_ERR(t->ce);
519 hole_size = 2 * PAGE_ALIGN(WIDTH * HEIGHT * 4);
520 hole_size *= 2; /* room to maneuver */
521 hole_size += 2 * I915_GTT_MIN_ALIGNMENT;
523 mutex_lock(&t->ce->vm->mutex);
524 memset(&hole, 0, sizeof(hole));
525 err = drm_mm_insert_node_in_range(&t->ce->vm->mm, &hole,
526 hole_size, 0, I915_COLOR_UNEVICTABLE,
530 drm_mm_remove_node(&hole);
531 mutex_unlock(&t->ce->vm->mutex);
537 t->hole = hole.start + I915_GTT_MIN_ALIGNMENT;
538 pr_info("Using hole at %llx\n", t->hole);
540 err = tiled_blits_create_buffers(t, WIDTH, HEIGHT, prng);
547 intel_context_put(t->ce);
553 static void tiled_blits_destroy(struct tiled_blits *t)
555 tiled_blits_destroy_buffers(t);
557 intel_context_put(t->ce);
561 static int tiled_blits_prepare(struct tiled_blits *t,
562 struct rnd_state *prng)
564 u64 offset = PAGE_ALIGN(t->width * t->height * 4);
569 map = i915_gem_object_pin_map(t->scratch.vma->obj, I915_MAP_WC);
573 /* Use scratch to fill objects */
574 for (i = 0; i < ARRAY_SIZE(t->buffers); i++) {
575 fill_scratch(t, map, prandom_u32_state(prng));
576 GEM_BUG_ON(verify_buffer(t, &t->scratch, prng));
579 &t->buffers[i], t->hole + offset,
580 &t->scratch, t->hole);
582 err = verify_buffer(t, &t->buffers[i], prng);
584 pr_err("Failed to create buffer %d\n", i);
589 i915_gem_object_unpin_map(t->scratch.vma->obj);
593 static int tiled_blits_bounce(struct tiled_blits *t, struct rnd_state *prng)
596 round_up(t->width * t->height * 4, 2 * I915_GTT_MIN_ALIGNMENT);
599 /* We want to check position invariant tiling across GTT eviction */
602 &t->buffers[1], t->hole + offset / 2,
603 &t->buffers[0], t->hole + 2 * offset);
607 /* Reposition so that we overlap the old addresses, and slightly off */
609 &t->buffers[2], t->hole + I915_GTT_MIN_ALIGNMENT,
610 &t->buffers[1], t->hole + 3 * offset / 2);
614 err = verify_buffer(t, &t->buffers[2], prng);
621 static int __igt_client_tiled_blits(struct intel_engine_cs *engine,
622 struct rnd_state *prng)
624 struct tiled_blits *t;
627 t = tiled_blits_create(engine, prng);
631 err = tiled_blits_prepare(t, prng);
635 err = tiled_blits_bounce(t, prng);
640 tiled_blits_destroy(t);
644 static bool has_bit17_swizzle(int sw)
646 return (sw == I915_BIT_6_SWIZZLE_9_10_17 ||
647 sw == I915_BIT_6_SWIZZLE_9_17);
650 static bool bad_swizzling(struct drm_i915_private *i915)
652 struct i915_ggtt *ggtt = &i915->ggtt;
654 if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
657 if (has_bit17_swizzle(ggtt->bit_6_swizzle_x) ||
658 has_bit17_swizzle(ggtt->bit_6_swizzle_y))
664 static int igt_client_tiled_blits(void *arg)
666 struct drm_i915_private *i915 = arg;
667 I915_RND_STATE(prng);
670 /* Test requires explicit BLT tiling controls */
671 if (INTEL_GEN(i915) < 4)
674 if (bad_swizzling(i915)) /* Requires sane (sub-page) swizzling */
678 struct intel_engine_cs *engine;
681 engine = intel_engine_lookup_user(i915,
682 I915_ENGINE_CLASS_COPY,
687 err = __igt_client_tiled_blits(engine, &prng);
695 int i915_gem_client_blt_live_selftests(struct drm_i915_private *i915)
697 static const struct i915_subtest tests[] = {
698 SUBTEST(igt_client_fill),
699 SUBTEST(igt_client_tiled_blits),
702 if (intel_gt_is_wedged(&i915->gt))
705 return i915_live_subtests(tests, i915);