2 * SPDX-License-Identifier: GPL-2.0
4 * Copyright © 2018 Intel Corporation
7 #include "i915_selftest.h"
8 #include "selftest_engine.h"
9 #include "selftest_engine_heartbeat.h"
10 #include "selftests/igt_atomic.h"
11 #include "selftests/igt_flush_test.h"
12 #include "selftests/igt_spinner.h"
14 static int live_engine_busy_stats(void *arg)
16 struct intel_gt *gt = arg;
17 struct intel_engine_cs *engine;
18 enum intel_engine_id id;
19 struct igt_spinner spin;
23 * Check that if an engine supports busy-stats, they tell the truth.
26 if (igt_spinner_init(&spin, gt))
29 GEM_BUG_ON(intel_gt_pm_is_awake(gt));
30 for_each_engine(engine, gt, id) {
31 struct i915_request *rq;
35 if (!intel_engine_supports_stats(engine))
38 if (!intel_engine_can_store_dword(engine))
41 if (intel_gt_pm_wait_for_idle(gt)) {
46 st_engine_heartbeat_disable(engine);
48 ENGINE_TRACE(engine, "measuring idle time\n");
50 de = intel_engine_get_busy_time(engine, &t[0]);
52 de = ktime_sub(intel_engine_get_busy_time(engine, &t[1]), de);
54 dt = ktime_sub(t[1], t[0]);
55 if (de < 0 || de > 10) {
56 pr_err("%s: reported %lldns [%d%%] busyness while sleeping [for %lldns]\n",
58 de, (int)div64_u64(100 * de, dt), dt);
65 rq = igt_spinner_create_request(&spin,
66 engine->kernel_context,
74 if (!igt_wait_for_spinner(&spin, rq)) {
75 intel_gt_set_wedged(engine->gt);
80 ENGINE_TRACE(engine, "measuring busy time\n");
82 de = intel_engine_get_busy_time(engine, &t[0]);
84 de = ktime_sub(intel_engine_get_busy_time(engine, &t[1]), de);
86 dt = ktime_sub(t[1], t[0]);
87 if (100 * de < 95 * dt || 95 * de > 100 * dt) {
88 pr_err("%s: reported %lldns [%d%%] busyness while spinning [for %lldns]\n",
90 de, (int)div64_u64(100 * de, dt), dt);
97 st_engine_heartbeat_enable(engine);
98 igt_spinner_end(&spin);
99 if (igt_flush_test(gt->i915))
105 igt_spinner_fini(&spin);
106 if (igt_flush_test(gt->i915))
111 static int live_engine_pm(void *arg)
113 struct intel_gt *gt = arg;
114 struct intel_engine_cs *engine;
115 enum intel_engine_id id;
118 * Check we can call intel_engine_pm_put from any context. No
119 * failures are reported directly, but if we mess up lockdep should
122 if (intel_gt_pm_wait_for_idle(gt)) {
123 pr_err("Unable to flush GT pm before test\n");
127 GEM_BUG_ON(intel_gt_pm_is_awake(gt));
128 for_each_engine(engine, gt, id) {
129 const typeof(*igt_atomic_phases) *p;
131 for (p = igt_atomic_phases; p->name; p++) {
133 * Acquisition is always synchronous, except if we
134 * know that the engine is already awake, in which
135 * case we should use intel_engine_pm_get_if_awake()
136 * to atomically grab the wakeref.
139 * intel_engine_pm_get();
140 * intel_engine_pm_put();
141 * occurs in one thread, while simultaneously
142 * intel_engine_pm_get_if_awake();
143 * intel_engine_pm_put();
144 * occurs from atomic context in another.
146 GEM_BUG_ON(intel_engine_pm_is_awake(engine));
147 intel_engine_pm_get(engine);
149 p->critical_section_begin();
150 if (!intel_engine_pm_get_if_awake(engine))
151 pr_err("intel_engine_pm_get_if_awake(%s) failed under %s\n",
152 engine->name, p->name);
154 intel_engine_pm_put_async(engine);
155 intel_engine_pm_put_async(engine);
156 p->critical_section_end();
158 intel_engine_pm_flush(engine);
160 if (intel_engine_pm_is_awake(engine)) {
161 pr_err("%s is still awake after flushing pm\n",
166 /* gt wakeref is async (deferred to workqueue) */
167 if (intel_gt_pm_wait_for_idle(gt)) {
168 pr_err("GT failed to idle\n");
177 int live_engine_pm_selftests(struct intel_gt *gt)
179 static const struct i915_subtest tests[] = {
180 SUBTEST(live_engine_busy_stats),
181 SUBTEST(live_engine_pm),
184 return intel_gt_live_subtests(tests, gt);