2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Mika Kuoppala <mika.kuoppala@intel.com>
29 #include "intel_renderstate.h"
30 #include "gt/intel_context.h"
31 #include "intel_ring.h"
33 static const struct intel_renderstate_rodata *
34 render_state_get_rodata(const struct intel_engine_cs *engine)
36 if (engine->class != RENDER_CLASS)
39 switch (INTEL_GEN(engine->i915)) {
41 return &gen6_null_state;
43 return &gen7_null_state;
45 return &gen8_null_state;
47 return &gen9_null_state;
54 * Macro to add commands to auxiliary batch.
55 * This macro only checks for page overflow before inserting the commands,
56 * this is sufficient as the null state generator makes the final batch
57 * with two passes to build command and state separately. At this point
58 * the size of both are known and it compacts them by relocating the state
59 * right after the commands taking care of alignment so we should sufficient
60 * space below them for adding new commands.
62 #define OUT_BATCH(batch, i, val) \
64 if ((i) >= PAGE_SIZE / sizeof(u32)) \
66 (batch)[(i)++] = (val); \
69 static int render_state_setup(struct intel_renderstate *so,
70 struct drm_i915_private *i915)
72 const struct intel_renderstate_rodata *rodata = so->rodata;
73 unsigned int i = 0, reloc_index = 0;
77 d = i915_gem_object_pin_map(so->vma->obj, I915_MAP_WB);
81 while (i < rodata->batch_items) {
82 u32 s = rodata->batch[i];
84 if (i * 4 == rodata->reloc[reloc_index]) {
85 u64 r = s + so->vma->node.start;
87 if (HAS_64BIT_RELOC(i915)) {
88 if (i + 1 >= rodata->batch_items ||
89 rodata->batch[i + 1] != 0)
102 if (rodata->reloc[reloc_index] != -1) {
103 drm_err(&i915->drm, "only %d relocs resolved\n", reloc_index);
107 so->batch_offset = i915_ggtt_offset(so->vma);
108 so->batch_size = rodata->batch_items * sizeof(u32);
110 while (i % CACHELINE_DWORDS)
111 OUT_BATCH(d, i, MI_NOOP);
113 so->aux_offset = i * sizeof(u32);
115 if (HAS_POOLED_EU(i915)) {
117 * We always program 3x6 pool config but depending upon which
118 * subslice is disabled HW drops down to appropriate config
121 * In the below table 2x6 config always refers to
122 * fused-down version, native 2x6 is not available and can
125 * SNo subslices config eu pool configuration
126 * -----------------------------------------------------------
127 * 1 3 subslices enabled (3x6) - 0x00777000 (9+9)
128 * 2 ss0 disabled (2x6) - 0x00777000 (3+9)
129 * 3 ss1 disabled (2x6) - 0x00770000 (6+6)
130 * 4 ss2 disabled (2x6) - 0x00007000 (9+3)
132 u32 eu_pool_config = 0x00777000;
134 OUT_BATCH(d, i, GEN9_MEDIA_POOL_STATE);
135 OUT_BATCH(d, i, GEN9_MEDIA_POOL_ENABLE);
136 OUT_BATCH(d, i, eu_pool_config);
142 OUT_BATCH(d, i, MI_BATCH_BUFFER_END);
143 so->aux_size = i * sizeof(u32) - so->aux_offset;
144 so->aux_offset += so->batch_offset;
146 * Since we are sending length, we need to strictly conform to
147 * all requirements. For Gen2 this must be a multiple of 8.
149 so->aux_size = ALIGN(so->aux_size, 8);
153 __i915_gem_object_flush_map(so->vma->obj, 0, i * sizeof(u32));
154 __i915_gem_object_release_map(so->vma->obj);
160 int intel_renderstate_init(struct intel_renderstate *so,
161 struct intel_context *ce)
163 struct intel_engine_cs *engine = ce->engine;
164 struct drm_i915_gem_object *obj = NULL;
167 memset(so, 0, sizeof(*so));
169 so->rodata = render_state_get_rodata(engine);
171 if (so->rodata->batch_items * 4 > PAGE_SIZE)
174 obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
178 so->vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
179 if (IS_ERR(so->vma)) {
180 err = PTR_ERR(so->vma);
185 i915_gem_ww_ctx_init(&so->ww, true);
187 err = intel_context_pin_ww(ce, &so->ww);
191 /* return early if there's nothing to setup */
192 if (!err && !so->rodata)
195 err = i915_gem_object_lock(so->vma->obj, &so->ww);
199 err = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
203 err = render_state_setup(so, engine->i915);
210 i915_vma_unpin(so->vma);
212 intel_context_unpin(ce);
214 if (err == -EDEADLK) {
215 err = i915_gem_ww_ctx_backoff(&so->ww);
219 i915_gem_ww_ctx_fini(&so->ww);
222 i915_gem_object_put(obj);
227 int intel_renderstate_emit(struct intel_renderstate *so,
228 struct i915_request *rq)
230 struct intel_engine_cs *engine = rq->engine;
236 err = i915_request_await_object(rq, so->vma->obj, false);
238 err = i915_vma_move_to_active(so->vma, rq, 0);
242 err = engine->emit_bb_start(rq,
243 so->batch_offset, so->batch_size,
244 I915_DISPATCH_SECURE);
248 if (so->aux_size > 8) {
249 err = engine->emit_bb_start(rq,
250 so->aux_offset, so->aux_size,
251 I915_DISPATCH_SECURE);
259 void intel_renderstate_fini(struct intel_renderstate *so,
260 struct intel_context *ce)
263 i915_vma_unpin(so->vma);
264 i915_vma_close(so->vma);
267 intel_context_unpin(ce);
268 i915_gem_ww_ctx_fini(&so->ww);
271 i915_gem_object_put(so->vma->obj);