1 // SPDX-License-Identifier: MIT
3 * Copyright © 2019 Intel Corporation
6 #include <linux/prime_numbers.h>
7 #include <linux/sort.h>
9 #include "../i915_selftest.h"
12 #include "mock_gem_device.h"
13 #include "mock_region.h"
15 #include "gem/i915_gem_context.h"
16 #include "gem/i915_gem_lmem.h"
17 #include "gem/i915_gem_region.h"
18 #include "gem/i915_gem_object_blt.h"
19 #include "gem/selftests/igt_gem_utils.h"
20 #include "gem/selftests/mock_context.h"
21 #include "gt/intel_engine_user.h"
22 #include "gt/intel_gt.h"
23 #include "i915_memcpy.h"
24 #include "selftests/igt_flush_test.h"
25 #include "selftests/i915_random.h"
27 static void close_objects(struct intel_memory_region *mem,
28 struct list_head *objects)
30 struct drm_i915_private *i915 = mem->i915;
31 struct drm_i915_gem_object *obj, *on;
33 list_for_each_entry_safe(obj, on, objects, st_link) {
34 if (i915_gem_object_has_pinned_pages(obj))
35 i915_gem_object_unpin_pages(obj);
36 /* No polluting the memory region between tests */
37 __i915_gem_object_put_pages(obj);
38 list_del(&obj->st_link);
39 i915_gem_object_put(obj);
44 i915_gem_drain_freed_objects(i915);
47 static int igt_mock_fill(void *arg)
49 struct intel_memory_region *mem = arg;
50 resource_size_t total = resource_size(&mem->region);
51 resource_size_t page_size;
53 unsigned long max_pages;
54 unsigned long page_num;
58 page_size = mem->mm.chunk_size;
59 max_pages = div64_u64(total, page_size);
62 for_each_prime_number_from(page_num, 1, max_pages) {
63 resource_size_t size = page_num * page_size;
64 struct drm_i915_gem_object *obj;
66 obj = i915_gem_object_create_region(mem, size, 0);
72 err = i915_gem_object_pin_pages(obj);
74 i915_gem_object_put(obj);
78 list_add(&obj->st_link, &objects);
85 if (page_num * page_size <= rem) {
86 pr_err("%s failed, space still left in region\n",
94 close_objects(mem, &objects);
99 static struct drm_i915_gem_object *
100 igt_object_create(struct intel_memory_region *mem,
101 struct list_head *objects,
105 struct drm_i915_gem_object *obj;
108 obj = i915_gem_object_create_region(mem, size, flags);
112 err = i915_gem_object_pin_pages(obj);
116 list_add(&obj->st_link, objects);
120 i915_gem_object_put(obj);
124 static void igt_object_release(struct drm_i915_gem_object *obj)
126 i915_gem_object_unpin_pages(obj);
127 __i915_gem_object_put_pages(obj);
128 list_del(&obj->st_link);
129 i915_gem_object_put(obj);
132 static int igt_mock_contiguous(void *arg)
134 struct intel_memory_region *mem = arg;
135 struct drm_i915_gem_object *obj;
136 unsigned long n_objects;
139 I915_RND_STATE(prng);
140 resource_size_t total;
145 total = resource_size(&mem->region);
148 obj = igt_object_create(mem, &objects, mem->mm.chunk_size,
149 I915_BO_ALLOC_CONTIGUOUS);
153 if (obj->mm.pages->nents != 1) {
154 pr_err("%s min object spans multiple sg entries\n", __func__);
156 goto err_close_objects;
159 igt_object_release(obj);
162 obj = igt_object_create(mem, &objects, total, I915_BO_ALLOC_CONTIGUOUS);
166 if (obj->mm.pages->nents != 1) {
167 pr_err("%s max object spans multiple sg entries\n", __func__);
169 goto err_close_objects;
172 igt_object_release(obj);
174 /* Internal fragmentation should not bleed into the object size */
175 target = i915_prandom_u64_state(&prng);
176 div64_u64_rem(target, total, &target);
177 target = round_up(target, PAGE_SIZE);
178 target = max_t(u64, PAGE_SIZE, target);
180 obj = igt_object_create(mem, &objects, target,
181 I915_BO_ALLOC_CONTIGUOUS);
185 if (obj->base.size != target) {
186 pr_err("%s obj->base.size(%zx) != target(%llx)\n", __func__,
187 obj->base.size, target);
189 goto err_close_objects;
192 if (obj->mm.pages->nents != 1) {
193 pr_err("%s object spans multiple sg entries\n", __func__);
195 goto err_close_objects;
198 igt_object_release(obj);
201 * Try to fragment the address space, such that half of it is free, but
202 * the max contiguous block size is SZ_64K.
206 n_objects = div64_u64(total, target);
208 while (n_objects--) {
209 struct list_head *list;
216 obj = igt_object_create(mem, list, target,
217 I915_BO_ALLOC_CONTIGUOUS);
220 goto err_close_objects;
224 close_objects(mem, &holes);
229 /* Make sure we can still allocate all the fragmented space */
230 obj = igt_object_create(mem, &objects, target, 0);
233 goto err_close_objects;
236 igt_object_release(obj);
239 * Even though we have enough free space, we don't have a big enough
240 * contiguous block. Make sure that holds true.
244 bool should_fail = target > min;
246 obj = igt_object_create(mem, &objects, target,
247 I915_BO_ALLOC_CONTIGUOUS);
248 if (should_fail != IS_ERR(obj)) {
249 pr_err("%s target allocation(%llx) mismatch\n",
252 goto err_close_objects;
256 } while (target >= mem->mm.chunk_size);
259 list_splice_tail(&holes, &objects);
260 close_objects(mem, &objects);
264 static int igt_mock_splintered_region(void *arg)
266 struct intel_memory_region *mem = arg;
267 struct drm_i915_private *i915 = mem->i915;
268 struct drm_i915_gem_object *obj;
269 unsigned int expected_order;
275 * Sanity check we can still allocate everything even if the
276 * mm.max_order != mm.size. i.e our starting address space size is not a
280 size = (SZ_4G - 1) & PAGE_MASK;
281 mem = mock_region_create(i915, 0, size, PAGE_SIZE, 0);
285 if (mem->mm.size != size) {
286 pr_err("%s size mismatch(%llu != %llu)\n",
287 __func__, mem->mm.size, size);
292 expected_order = get_order(rounddown_pow_of_two(size));
293 if (mem->mm.max_order != expected_order) {
294 pr_err("%s order mismatch(%u != %u)\n",
295 __func__, mem->mm.max_order, expected_order);
300 obj = igt_object_create(mem, &objects, size, 0);
306 close_objects(mem, &objects);
309 * While we should be able allocate everything without any flag
310 * restrictions, if we consider I915_BO_ALLOC_CONTIGUOUS then we are
311 * actually limited to the largest power-of-two for the region size i.e
312 * max_order, due to the inner workings of the buddy allocator. So make
313 * sure that does indeed hold true.
316 obj = igt_object_create(mem, &objects, size, I915_BO_ALLOC_CONTIGUOUS);
318 pr_err("%s too large contiguous allocation was not rejected\n",
324 obj = igt_object_create(mem, &objects, rounddown_pow_of_two(size),
325 I915_BO_ALLOC_CONTIGUOUS);
327 pr_err("%s largest possible contiguous allocation failed\n",
334 close_objects(mem, &objects);
336 intel_memory_region_put(mem);
340 static int igt_gpu_write_dw(struct intel_context *ce,
341 struct i915_vma *vma,
345 return igt_gpu_fill_dw(ce, vma, dword * sizeof(u32),
346 vma->size >> PAGE_SHIFT, value);
349 static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
351 unsigned long n = obj->base.size >> PAGE_SHIFT;
355 err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
359 ptr = i915_gem_object_pin_map(obj, I915_MAP_WC);
366 pr_err("base[%u]=%08x, val=%08x\n",
372 ptr += PAGE_SIZE / sizeof(*ptr);
375 i915_gem_object_unpin_map(obj);
379 static int igt_gpu_write(struct i915_gem_context *ctx,
380 struct drm_i915_gem_object *obj)
382 struct i915_gem_engines *engines;
383 struct i915_gem_engines_iter it;
384 struct i915_address_space *vm;
385 struct intel_context *ce;
386 I915_RND_STATE(prng);
387 IGT_TIMEOUT(end_time);
389 struct i915_vma *vma;
394 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
398 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
400 if (!intel_engine_can_store_dword(ce->engine))
406 i915_gem_context_unlock_engines(ctx);
410 order = i915_random_order(count * count, &prng);
414 vma = i915_vma_instance(obj, vm, NULL);
420 err = i915_vma_pin(vma, 0, 0, PIN_USER);
425 engines = i915_gem_context_lock_engines(ctx);
427 u32 rng = prandom_u32_state(&prng);
428 u32 dword = offset_in_page(rng) / 4;
430 ce = engines->engines[order[i] % engines->num_engines];
431 i = (i + 1) % (count * count);
432 if (!ce || !intel_engine_can_store_dword(ce->engine))
435 err = igt_gpu_write_dw(ce, vma, dword, rng);
439 err = igt_cpu_check(obj, dword, rng);
442 } while (!__igt_timeout(end_time, NULL));
443 i915_gem_context_unlock_engines(ctx);
454 static int igt_lmem_create(void *arg)
456 struct drm_i915_private *i915 = arg;
457 struct drm_i915_gem_object *obj;
460 obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, 0);
464 err = i915_gem_object_pin_pages(obj);
468 i915_gem_object_unpin_pages(obj);
470 i915_gem_object_put(obj);
475 static int igt_lmem_write_gpu(void *arg)
477 struct drm_i915_private *i915 = arg;
478 struct drm_i915_gem_object *obj;
479 struct i915_gem_context *ctx;
481 I915_RND_STATE(prng);
485 file = mock_file(i915);
487 return PTR_ERR(file);
489 ctx = live_context(i915, file);
495 sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
497 obj = i915_gem_object_create_lmem(i915, sz, 0);
503 err = i915_gem_object_pin_pages(obj);
507 err = igt_gpu_write(ctx, obj);
509 pr_err("igt_gpu_write failed(%d)\n", err);
511 i915_gem_object_unpin_pages(obj);
513 i915_gem_object_put(obj);
519 static struct intel_engine_cs *
520 random_engine_class(struct drm_i915_private *i915,
522 struct rnd_state *prng)
524 struct intel_engine_cs *engine;
528 for (engine = intel_engine_lookup_user(i915, class, 0);
529 engine && engine->uabi_class == class;
530 engine = rb_entry_safe(rb_next(&engine->uabi_node),
531 typeof(*engine), uabi_node))
534 count = i915_prandom_u32_max_state(count, prng);
535 return intel_engine_lookup_user(i915, class, count);
538 static int igt_lmem_write_cpu(void *arg)
540 struct drm_i915_private *i915 = arg;
541 struct drm_i915_gem_object *obj;
542 I915_RND_STATE(prng);
543 IGT_TIMEOUT(end_time);
545 0, /* rng placeholder */
550 PAGE_SIZE - sizeof(u32),
551 PAGE_SIZE - sizeof(u64),
554 struct intel_engine_cs *engine;
562 engine = random_engine_class(i915, I915_ENGINE_CLASS_COPY, &prng);
566 pr_info("%s: using %s\n", __func__, engine->name);
568 sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
569 sz = max_t(u32, 2 * PAGE_SIZE, sz);
571 obj = i915_gem_object_create_lmem(i915, sz, I915_BO_ALLOC_CONTIGUOUS);
575 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
577 err = PTR_ERR(vaddr);
581 /* Put the pages into a known state -- from the gpu for added fun */
582 intel_engine_pm_get(engine);
583 err = i915_gem_object_fill_blt(obj, engine->kernel_context, 0xdeadbeaf);
584 intel_engine_pm_put(engine);
588 i915_gem_object_lock(obj, NULL);
589 err = i915_gem_object_set_to_wc_domain(obj, true);
590 i915_gem_object_unlock(obj);
594 count = ARRAY_SIZE(bytes);
595 order = i915_random_order(count * count, &prng);
601 /* A random multiple of u32, picked between [64, PAGE_SIZE - 64] */
602 bytes[0] = igt_random_offset(&prng, 64, PAGE_SIZE - 64, 0, sizeof(u32));
603 GEM_BUG_ON(!IS_ALIGNED(bytes[0], sizeof(u32)));
613 size = bytes[order[i] % count];
614 i = (i + 1) % (count * count);
616 align = bytes[order[i] % count];
617 i = (i + 1) % (count * count);
619 align = max_t(u32, sizeof(u32), rounddown_pow_of_two(align));
621 offset = igt_random_offset(&prng, 0, obj->base.size,
624 val = prandom_u32_state(&prng);
625 memset32(vaddr + offset / sizeof(u32), val ^ 0xdeadbeaf,
629 * Sample random dw -- don't waste precious time reading every
632 dword = igt_random_offset(&prng, offset,
634 sizeof(u32), sizeof(u32));
635 dword /= sizeof(u32);
636 if (vaddr[dword] != (val ^ 0xdeadbeaf)) {
637 pr_err("%s vaddr[%u]=%u, val=%u, size=%u, align=%u, offset=%u\n",
638 __func__, dword, vaddr[dword], val ^ 0xdeadbeaf,
639 size, align, offset);
643 } while (!__igt_timeout(end_time, NULL));
646 i915_gem_object_unpin_map(obj);
648 i915_gem_object_put(obj);
653 static const char *repr_type(u32 type)
665 static struct drm_i915_gem_object *
666 create_region_for_mapping(struct intel_memory_region *mr, u64 size, u32 type,
669 struct drm_i915_gem_object *obj;
672 obj = i915_gem_object_create_region(mr, size, 0);
674 if (PTR_ERR(obj) == -ENOSPC) /* Stolen memory */
675 return ERR_PTR(-ENODEV);
679 addr = i915_gem_object_pin_map(obj, type);
681 i915_gem_object_put(obj);
682 if (PTR_ERR(addr) == -ENXIO)
683 return ERR_PTR(-ENODEV);
691 static int wrap_ktime_compare(const void *A, const void *B)
693 const ktime_t *a = A, *b = B;
695 return ktime_compare(*a, *b);
698 static void igt_memcpy_long(void *dst, const void *src, size_t size)
700 unsigned long *tmp = dst;
701 const unsigned long *s = src;
703 size = size / sizeof(unsigned long);
708 static inline void igt_memcpy(void *dst, const void *src, size_t size)
710 memcpy(dst, src, size);
713 static inline void igt_memcpy_from_wc(void *dst, const void *src, size_t size)
715 i915_memcpy_from_wc(dst, src, size);
718 static int _perf_memcpy(struct intel_memory_region *src_mr,
719 struct intel_memory_region *dst_mr,
720 u64 size, u32 src_type, u32 dst_type)
722 struct drm_i915_private *i915 = src_mr->i915;
725 void (*copy)(void *dst, const void *src, size_t size);
739 !i915_has_memcpy_from_wc(),
742 struct drm_i915_gem_object *src, *dst;
743 void *src_addr, *dst_addr;
747 src = create_region_for_mapping(src_mr, size, src_type, &src_addr);
753 dst = create_region_for_mapping(dst_mr, size, dst_type, &dst_addr);
759 for (i = 0; i < ARRAY_SIZE(tests); ++i) {
766 for (pass = 0; pass < ARRAY_SIZE(t); pass++) {
771 tests[i].copy(dst_addr, src_addr, size);
774 t[pass] = ktime_sub(t1, t0);
777 sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
778 pr_info("%s src(%s, %s) -> dst(%s, %s) %14s %4llu KiB copy: %5lld MiB/s\n",
786 div64_u64(mul_u32_u32(4 * size,
788 t[1] + 2 * t[2] + t[3]) >> 20);
793 i915_gem_object_unpin_map(dst);
794 i915_gem_object_put(dst);
796 i915_gem_object_unpin_map(src);
797 i915_gem_object_put(src);
799 i915_gem_drain_freed_objects(i915);
807 static int perf_memcpy(void *arg)
809 struct drm_i915_private *i915 = arg;
810 static const u32 types[] = {
814 static const u32 sizes[] = {
819 struct intel_memory_region *src_mr, *dst_mr;
824 for_each_memory_region(src_mr, i915, src_id) {
825 for_each_memory_region(dst_mr, i915, dst_id) {
826 for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
827 for (j = 0; j < ARRAY_SIZE(types); ++j) {
828 for (k = 0; k < ARRAY_SIZE(types); ++k) {
829 ret = _perf_memcpy(src_mr,
845 int intel_memory_region_mock_selftests(void)
847 static const struct i915_subtest tests[] = {
848 SUBTEST(igt_mock_fill),
849 SUBTEST(igt_mock_contiguous),
850 SUBTEST(igt_mock_splintered_region),
852 struct intel_memory_region *mem;
853 struct drm_i915_private *i915;
856 i915 = mock_gem_device();
860 mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0);
862 pr_err("failed to create memory region\n");
867 err = i915_subtests(tests, mem);
869 intel_memory_region_put(mem);
871 mock_destroy_device(i915);
875 int intel_memory_region_live_selftests(struct drm_i915_private *i915)
877 static const struct i915_subtest tests[] = {
878 SUBTEST(igt_lmem_create),
879 SUBTEST(igt_lmem_write_cpu),
880 SUBTEST(igt_lmem_write_gpu),
883 if (!HAS_LMEM(i915)) {
884 pr_info("device lacks LMEM support, skipping\n");
888 if (intel_gt_is_wedged(&i915->gt))
891 return i915_live_subtests(tests, i915);
894 int intel_memory_region_perf_selftests(struct drm_i915_private *i915)
896 static const struct i915_subtest tests[] = {
897 SUBTEST(perf_memcpy),
900 if (intel_gt_is_wedged(&i915->gt))
903 return i915_live_subtests(tests, i915);