2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Ben Widawsky <ben@bwidawsk.net>
25 * Michel Thierry <michel.thierry@intel.com>
26 * Thomas Daniel <thomas.daniel@intel.com>
27 * Oscar Mateo <oscar.mateo@intel.com>
32 * DOC: Logical Rings, Logical Ring Contexts and Execlists
35 * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
36 * These expanded contexts enable a number of new abilities, especially
37 * "Execlists" (also implemented in this file).
39 * One of the main differences with the legacy HW contexts is that logical
40 * ring contexts incorporate many more things to the context's state, like
41 * PDPs or ringbuffer control registers:
43 * The reason why PDPs are included in the context is straightforward: as
44 * PPGTTs (per-process GTTs) are actually per-context, having the PDPs
45 * contained there mean you don't need to do a ppgtt->switch_mm yourself,
46 * instead, the GPU will do it for you on the context switch.
48 * But, what about the ringbuffer control registers (head, tail, etc..)?
49 * shouldn't we just need a set of those per engine command streamer? This is
50 * where the name "Logical Rings" starts to make sense: by virtualizing the
51 * rings, the engine cs shifts to a new "ring buffer" with every context
52 * switch. When you want to submit a workload to the GPU you: A) choose your
53 * context, B) find its appropriate virtualized ring, C) write commands to it
54 * and then, finally, D) tell the GPU to switch to that context.
56 * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch
57 * to a contexts is via a context execution list, ergo "Execlists".
60 * Regarding the creation of contexts, we have:
62 * - One global default context.
63 * - One local default context for each opened fd.
64 * - One local extra context for each context create ioctl call.
66 * Now that ringbuffers belong per-context (and not per-engine, like before)
67 * and that contexts are uniquely tied to a given engine (and not reusable,
68 * like before) we need:
70 * - One ringbuffer per-engine inside each context.
71 * - One backing object per-engine inside each context.
73 * The global default context starts its life with these new objects fully
74 * allocated and populated. The local default context for each opened fd is
75 * more complex, because we don't know at creation time which engine is going
76 * to use them. To handle this, we have implemented a deferred creation of LR
79 * The local context starts its life as a hollow or blank holder, that only
80 * gets populated for a given engine once we receive an execbuffer. If later
81 * on we receive another execbuffer ioctl for the same context but a different
82 * engine, we allocate/populate a new ringbuffer and context backing object and
85 * Finally, regarding local contexts created using the ioctl call: as they are
86 * only allowed with the render ring, we can allocate & populate them right
87 * away (no need to defer anything, at least for now).
89 * Execlists implementation:
90 * Execlists are the new method by which, on gen8+ hardware, workloads are
91 * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
92 * This method works as follows:
94 * When a request is committed, its commands (the BB start and any leading or
95 * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer
96 * for the appropriate context. The tail pointer in the hardware context is not
97 * updated at this time, but instead, kept by the driver in the ringbuffer
98 * structure. A structure representing this request is added to a request queue
99 * for the appropriate engine: this structure contains a copy of the context's
100 * tail after the request was written to the ring buffer and a pointer to the
103 * If the engine's request queue was empty before the request was added, the
104 * queue is processed immediately. Otherwise the queue will be processed during
105 * a context switch interrupt. In any case, elements on the queue will get sent
106 * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a
107 * globally unique 20-bits submission ID.
109 * When execution of a request completes, the GPU updates the context status
110 * buffer with a context complete event and generates a context switch interrupt.
111 * During the interrupt handling, the driver examines the events in the buffer:
112 * for each context complete event, if the announced ID matches that on the head
113 * of the request queue, then that request is retired and removed from the queue.
115 * After processing, if any requests were retired and the queue is not empty
116 * then a new execution list can be submitted. The two requests at the front of
117 * the queue are next to be submitted but since a context may not occur twice in
118 * an execution list, if subsequent requests have the same ID as the first then
119 * the two requests must be combined. This is done simply by discarding requests
120 * at the head of the queue until either only one requests is left (in which case
121 * we use a NULL second context) or the first two requests have unique IDs.
123 * By always executing the first two requests in the queue the driver ensures
124 * that the GPU is kept as busy as possible. In the case where a single context
125 * completes but a second context is still executing, the request for this second
126 * context will be at the head of the queue when we remove the first one. This
127 * request will then be resubmitted along with a new request for a different context,
128 * which will cause the hardware to continue executing the second request and queue
129 * the new request (the GPU detects the condition of a context getting preempted
130 * with the same context and optimizes the context switch flow by not doing
131 * preemption, but just sampling the new tail pointer).
134 #include <linux/interrupt.h>
136 #include <drm/drmP.h>
137 #include <drm/i915_drm.h>
138 #include "i915_drv.h"
139 #include "intel_mocs.h"
141 #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
142 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
143 #define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
145 #define RING_EXECLIST_QFULL (1 << 0x2)
146 #define RING_EXECLIST1_VALID (1 << 0x3)
147 #define RING_EXECLIST0_VALID (1 << 0x4)
148 #define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE)
149 #define RING_EXECLIST1_ACTIVE (1 << 0x11)
150 #define RING_EXECLIST0_ACTIVE (1 << 0x12)
152 #define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0)
153 #define GEN8_CTX_STATUS_PREEMPTED (1 << 1)
154 #define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2)
155 #define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3)
156 #define GEN8_CTX_STATUS_COMPLETE (1 << 4)
157 #define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15)
159 #define GEN8_CTX_STATUS_COMPLETED_MASK \
160 (GEN8_CTX_STATUS_ACTIVE_IDLE | \
161 GEN8_CTX_STATUS_PREEMPTED | \
162 GEN8_CTX_STATUS_ELEMENT_SWITCH)
164 #define CTX_LRI_HEADER_0 0x01
165 #define CTX_CONTEXT_CONTROL 0x02
166 #define CTX_RING_HEAD 0x04
167 #define CTX_RING_TAIL 0x06
168 #define CTX_RING_BUFFER_START 0x08
169 #define CTX_RING_BUFFER_CONTROL 0x0a
170 #define CTX_BB_HEAD_U 0x0c
171 #define CTX_BB_HEAD_L 0x0e
172 #define CTX_BB_STATE 0x10
173 #define CTX_SECOND_BB_HEAD_U 0x12
174 #define CTX_SECOND_BB_HEAD_L 0x14
175 #define CTX_SECOND_BB_STATE 0x16
176 #define CTX_BB_PER_CTX_PTR 0x18
177 #define CTX_RCS_INDIRECT_CTX 0x1a
178 #define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c
179 #define CTX_LRI_HEADER_1 0x21
180 #define CTX_CTX_TIMESTAMP 0x22
181 #define CTX_PDP3_UDW 0x24
182 #define CTX_PDP3_LDW 0x26
183 #define CTX_PDP2_UDW 0x28
184 #define CTX_PDP2_LDW 0x2a
185 #define CTX_PDP1_UDW 0x2c
186 #define CTX_PDP1_LDW 0x2e
187 #define CTX_PDP0_UDW 0x30
188 #define CTX_PDP0_LDW 0x32
189 #define CTX_LRI_HEADER_2 0x41
190 #define CTX_R_PWR_CLK_STATE 0x42
191 #define CTX_GPGPU_CSR_BASE_ADDRESS 0x44
193 #define GEN8_CTX_VALID (1<<0)
194 #define GEN8_CTX_FORCE_PD_RESTORE (1<<1)
195 #define GEN8_CTX_FORCE_RESTORE (1<<2)
196 #define GEN8_CTX_L3LLC_COHERENT (1<<5)
197 #define GEN8_CTX_PRIVILEGE (1<<8)
199 #define ASSIGN_CTX_REG(reg_state, pos, reg, val) do { \
200 (reg_state)[(pos)+0] = i915_mmio_reg_offset(reg); \
201 (reg_state)[(pos)+1] = (val); \
204 #define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do { \
205 const u64 _addr = i915_page_dir_dma_addr((ppgtt), (n)); \
206 reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \
207 reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \
210 #define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \
211 reg_state[CTX_PDP0_UDW + 1] = upper_32_bits(px_dma(&ppgtt->pml4)); \
212 reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \
217 FAULT_AND_HALT, /* Debug only */
219 FAULT_AND_CONTINUE /* Unsupported */
221 #define GEN8_CTX_ID_SHIFT 32
222 #define GEN8_CTX_ID_WIDTH 21
223 #define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
224 #define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26
226 /* Typical size of the average request (2 pipecontrols and a MI_BB) */
227 #define EXECLISTS_REQUEST_SIZE 64 /* bytes */
229 #define WA_TAIL_DWORDS 2
231 static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
232 struct intel_engine_cs *engine);
233 static int intel_lr_context_pin(struct i915_gem_context *ctx,
234 struct intel_engine_cs *engine);
235 static void execlists_init_reg_state(u32 *reg_state,
236 struct i915_gem_context *ctx,
237 struct intel_engine_cs *engine,
238 struct intel_ring *ring);
241 * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
242 * @dev_priv: i915 device private
243 * @enable_execlists: value of i915.enable_execlists module parameter.
245 * Only certain platforms support Execlists (the prerequisites being
246 * support for Logical Ring Contexts and Aliasing PPGTT or better).
248 * Return: 1 if Execlists is supported and has to be enabled.
250 int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enable_execlists)
252 /* On platforms with execlist available, vGPU will only
253 * support execlist mode, no ring buffer mode.
255 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && intel_vgpu_active(dev_priv))
258 if (INTEL_GEN(dev_priv) >= 9)
261 if (enable_execlists == 0)
264 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) &&
265 USES_PPGTT(dev_priv) &&
266 i915.use_mmio_flip >= 0)
273 logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
275 struct drm_i915_private *dev_priv = engine->i915;
277 engine->disable_lite_restore_wa =
278 (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
279 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) &&
280 (engine->id == VCS || engine->id == VCS2);
282 engine->ctx_desc_template = GEN8_CTX_VALID;
283 if (IS_GEN8(dev_priv))
284 engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
285 engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
287 /* TODO: WaDisableLiteRestore when we start using semaphore
288 * signalling between Command Streamers */
289 /* ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; */
291 /* WaEnableForceRestoreInCtxtDescForVCS:skl */
292 /* WaEnableForceRestoreInCtxtDescForVCS:bxt */
293 if (engine->disable_lite_restore_wa)
294 engine->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
298 * intel_lr_context_descriptor_update() - calculate & cache the descriptor
299 * descriptor for a pinned context
300 * @ctx: Context to work on
301 * @engine: Engine the descriptor will be used with
303 * The context descriptor encodes various attributes of a context,
304 * including its GTT address and some flags. Because it's fairly
305 * expensive to calculate, we'll just do it once and cache the result,
306 * which remains valid until the context is unpinned.
308 * This is what a descriptor looks like, from LSB to MSB::
310 * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template)
311 * bits 12-31: LRCA, GTT address of (the HWSP of) this context
312 * bits 32-52: ctx ID, a globally unique tag
313 * bits 53-54: mbz, reserved for use by hardware
314 * bits 55-63: group ID, currently unused and set to 0
317 intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
318 struct intel_engine_cs *engine)
320 struct intel_context *ce = &ctx->engine[engine->id];
323 BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH));
325 desc = ctx->desc_template; /* bits 3-4 */
326 desc |= engine->ctx_desc_template; /* bits 0-11 */
327 desc |= i915_ggtt_offset(ce->state) + LRC_PPHWSP_PN * PAGE_SIZE;
329 desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */
334 uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
335 struct intel_engine_cs *engine)
337 return ctx->engine[engine->id].lrc_desc;
341 execlists_context_status_change(struct drm_i915_gem_request *rq,
342 unsigned long status)
345 * Only used when GVT-g is enabled now. When GVT-g is disabled,
346 * The compiler should eliminate this function as dead-code.
348 if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
351 atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq);
355 execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
357 ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
358 ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
359 ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
360 ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
363 static u64 execlists_update_context(struct drm_i915_gem_request *rq)
365 struct intel_context *ce = &rq->ctx->engine[rq->engine->id];
366 struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
367 u32 *reg_state = ce->lrc_reg_state;
369 reg_state[CTX_RING_TAIL+1] = intel_ring_offset(rq->ring, rq->tail);
372 * True 32b PPGTT with dynamic page allocation: update PDP
373 * registers and point the unallocated PDPs to scratch page.
374 * PML4 is allocated during ppgtt init, so this is not needed
377 if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
378 execlists_update_context_pdps(ppgtt, reg_state);
381 * Make sure the context image is complete before we submit it to HW.
383 * Ostensibly, writes (including the WCB) should be flushed prior to
384 * an uncached write such as our mmio register access, the empirical
385 * evidence (esp. on Braswell) suggests that the WC write into memory
386 * may not be visible to the HW prior to the completion of the UC
387 * register write and that we may begin execution from the context
388 * before its image is complete leading to invalid PD chasing.
390 * Furthermore, Braswell, at least, wants a full mb to be sure that
391 * the writes are coherent in memory (visible to the GPU) prior to
392 * execution, and not just visible to other CPUs (as is the result of
399 static void execlists_submit_ports(struct intel_engine_cs *engine)
401 struct drm_i915_private *dev_priv = engine->i915;
402 struct execlist_port *port = engine->execlist_port;
404 dev_priv->regs + i915_mmio_reg_offset(RING_ELSP(engine));
408 execlists_context_status_change(port[0].request,
409 INTEL_CONTEXT_SCHEDULE_IN);
410 desc[0] = execlists_update_context(port[0].request);
411 engine->preempt_wa = port[0].count++; /* bdw only? fixed on skl? */
413 if (port[1].request) {
414 GEM_BUG_ON(port[1].count);
415 execlists_context_status_change(port[1].request,
416 INTEL_CONTEXT_SCHEDULE_IN);
417 desc[1] = execlists_update_context(port[1].request);
422 GEM_BUG_ON(desc[0] == desc[1]);
424 /* You must always write both descriptors in the order below. */
425 writel(upper_32_bits(desc[1]), elsp);
426 writel(lower_32_bits(desc[1]), elsp);
428 writel(upper_32_bits(desc[0]), elsp);
429 /* The context is automatically loaded after the following */
430 writel(lower_32_bits(desc[0]), elsp);
433 static bool ctx_single_port_submission(const struct i915_gem_context *ctx)
435 return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
436 ctx->execlists_force_single_submission);
439 static bool can_merge_ctx(const struct i915_gem_context *prev,
440 const struct i915_gem_context *next)
445 if (ctx_single_port_submission(prev))
451 static void execlists_dequeue(struct intel_engine_cs *engine)
453 struct drm_i915_gem_request *cursor, *last;
454 struct execlist_port *port = engine->execlist_port;
457 last = port->request;
459 /* WaIdleLiteRestore:bdw,skl
460 * Apply the wa NOOPs to prevent ring:HEAD == req:TAIL
461 * as we resubmit the request. See gen8_emit_request()
462 * for where we prepare the padding after the end of the
465 last->tail = last->wa_tail;
467 GEM_BUG_ON(port[1].request);
469 /* Hardware submission is through 2 ports. Conceptually each port
470 * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is
471 * static for a context, and unique to each, so we only execute
472 * requests belonging to a single context from each ring. RING_HEAD
473 * is maintained by the CS in the context image, it marks the place
474 * where it got up to last time, and through RING_TAIL we tell the CS
475 * where we want to execute up to this time.
477 * In this list the requests are in order of execution. Consecutive
478 * requests from the same context are adjacent in the ringbuffer. We
479 * can combine these requests into a single RING_TAIL update:
481 * RING_HEAD...req1...req2
483 * since to execute req2 the CS must first execute req1.
485 * Our goal then is to point each port to the end of a consecutive
486 * sequence of requests as being the most optimal (fewest wake ups
487 * and context switches) submission.
490 spin_lock(&engine->execlist_lock);
491 list_for_each_entry(cursor, &engine->execlist_queue, execlist_link) {
492 /* Can we combine this request with the current port? It has to
493 * be the same context/ringbuffer and not have any exceptions
494 * (e.g. GVT saying never to combine contexts).
496 * If we can combine the requests, we can execute both by
497 * updating the RING_TAIL to point to the end of the second
498 * request, and so we never need to tell the hardware about
501 if (last && !can_merge_ctx(cursor->ctx, last->ctx)) {
502 /* If we are on the second port and cannot combine
503 * this request with the last, then we are done.
505 if (port != engine->execlist_port)
508 /* If GVT overrides us we only ever submit port[0],
509 * leaving port[1] empty. Note that we also have
510 * to be careful that we don't queue the same
511 * context (even though a different request) to
514 if (ctx_single_port_submission(cursor->ctx))
517 GEM_BUG_ON(last->ctx == cursor->ctx);
519 i915_gem_request_assign(&port->request, last);
526 /* Decouple all the requests submitted from the queue */
527 engine->execlist_queue.next = &cursor->execlist_link;
528 cursor->execlist_link.prev = &engine->execlist_queue;
530 i915_gem_request_assign(&port->request, last);
532 spin_unlock(&engine->execlist_lock);
535 execlists_submit_ports(engine);
538 static bool execlists_elsp_idle(struct intel_engine_cs *engine)
540 return !engine->execlist_port[0].request;
543 static bool execlists_elsp_ready(struct intel_engine_cs *engine)
547 port = 1; /* wait for a free slot */
548 if (engine->disable_lite_restore_wa || engine->preempt_wa)
549 port = 0; /* wait for GPU to be idle before continuing */
551 return !engine->execlist_port[port].request;
555 * Check the unread Context Status Buffers and manage the submission of new
556 * contexts to the ELSP accordingly.
558 static void intel_lrc_irq_handler(unsigned long data)
560 struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
561 struct execlist_port *port = engine->execlist_port;
562 struct drm_i915_private *dev_priv = engine->i915;
564 intel_uncore_forcewake_get(dev_priv, engine->fw_domains);
566 if (!execlists_elsp_idle(engine)) {
567 u32 __iomem *csb_mmio =
568 dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine));
570 dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0));
571 unsigned int csb, head, tail;
573 csb = readl(csb_mmio);
574 head = GEN8_CSB_READ_PTR(csb);
575 tail = GEN8_CSB_WRITE_PTR(csb);
577 tail += GEN8_CSB_ENTRIES;
578 while (head < tail) {
579 unsigned int idx = ++head % GEN8_CSB_ENTRIES;
580 unsigned int status = readl(buf + 2 * idx);
582 if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK))
585 GEM_BUG_ON(port[0].count == 0);
586 if (--port[0].count == 0) {
587 GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
588 execlists_context_status_change(port[0].request,
589 INTEL_CONTEXT_SCHEDULE_OUT);
591 i915_gem_request_put(port[0].request);
593 memset(&port[1], 0, sizeof(port[1]));
595 engine->preempt_wa = false;
598 GEM_BUG_ON(port[0].count == 0 &&
599 !(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
602 writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
603 GEN8_CSB_WRITE_PTR(csb) << 8),
607 if (execlists_elsp_ready(engine))
608 execlists_dequeue(engine);
610 intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
613 static void execlists_submit_request(struct drm_i915_gem_request *request)
615 struct intel_engine_cs *engine = request->engine;
618 spin_lock_irqsave(&engine->execlist_lock, flags);
620 list_add_tail(&request->execlist_link, &engine->execlist_queue);
621 if (execlists_elsp_idle(engine))
622 tasklet_hi_schedule(&engine->irq_tasklet);
624 spin_unlock_irqrestore(&engine->execlist_lock, flags);
627 int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
629 struct intel_engine_cs *engine = request->engine;
630 struct intel_context *ce = &request->ctx->engine[engine->id];
633 /* Flush enough space to reduce the likelihood of waiting after
634 * we start building the request - in which case we will just
635 * have to repeat work.
637 request->reserved_space += EXECLISTS_REQUEST_SIZE;
640 ret = execlists_context_deferred_alloc(request->ctx, engine);
645 request->ring = ce->ring;
647 ret = intel_lr_context_pin(request->ctx, engine);
651 if (i915.enable_guc_submission) {
653 * Check that the GuC has space for the request before
654 * going any further, as the i915_add_request() call
655 * later on mustn't fail ...
657 ret = i915_guc_wq_reserve(request);
662 ret = intel_ring_begin(request, 0);
666 if (!ce->initialised) {
667 ret = engine->init_context(request);
671 ce->initialised = true;
674 /* Note that after this point, we have committed to using
675 * this request as it is being used to both track the
676 * state of engine initialisation and liveness of the
677 * golden renderstate above. Think twice before you try
678 * to cancel/unwind this request now.
681 request->reserved_space -= EXECLISTS_REQUEST_SIZE;
685 if (i915.enable_guc_submission)
686 i915_guc_wq_unreserve(request);
688 intel_lr_context_unpin(request->ctx, engine);
693 * intel_logical_ring_advance() - advance the tail and prepare for submission
694 * @request: Request to advance the logical ringbuffer of.
696 * The tail is updated in our logical ringbuffer struct, not in the actual context. What
697 * really happens during submission is that the context and current tail will be placed
698 * on a queue waiting for the ELSP to be ready to accept a new context submission. At that
699 * point, the tail *inside* the context is updated and the ELSP written to.
702 intel_logical_ring_advance(struct drm_i915_gem_request *request)
704 struct intel_ring *ring = request->ring;
705 struct intel_engine_cs *engine = request->engine;
707 intel_ring_advance(ring);
708 request->tail = ring->tail;
711 * Here we add two extra NOOPs as padding to avoid
712 * lite restore of a context with HEAD==TAIL.
714 * Caller must reserve WA_TAIL_DWORDS for us!
716 intel_ring_emit(ring, MI_NOOP);
717 intel_ring_emit(ring, MI_NOOP);
718 intel_ring_advance(ring);
719 request->wa_tail = ring->tail;
721 /* We keep the previous context alive until we retire the following
722 * request. This ensures that any the context object is still pinned
723 * for any residual writes the HW makes into it on the context switch
724 * into the next object following the breadcrumb. Otherwise, we may
725 * retire the context too early.
727 request->previous_context = engine->last_context;
728 engine->last_context = request->ctx;
732 static int intel_lr_context_pin(struct i915_gem_context *ctx,
733 struct intel_engine_cs *engine)
735 struct intel_context *ce = &ctx->engine[engine->id];
739 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
744 ret = i915_vma_pin(ce->state, 0, GEN8_LR_CONTEXT_ALIGN,
745 PIN_OFFSET_BIAS | GUC_WOPCM_TOP | PIN_GLOBAL);
749 vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
751 ret = PTR_ERR(vaddr);
755 ret = intel_ring_pin(ce->ring);
759 intel_lr_context_descriptor_update(ctx, engine);
761 ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
762 ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
763 i915_ggtt_offset(ce->ring->vma);
765 ce->state->obj->dirty = true;
767 /* Invalidate GuC TLB. */
768 if (i915.enable_guc_submission) {
769 struct drm_i915_private *dev_priv = ctx->i915;
770 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
773 i915_gem_context_get(ctx);
777 i915_gem_object_unpin_map(ce->state->obj);
779 __i915_vma_unpin(ce->state);
785 void intel_lr_context_unpin(struct i915_gem_context *ctx,
786 struct intel_engine_cs *engine)
788 struct intel_context *ce = &ctx->engine[engine->id];
790 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
791 GEM_BUG_ON(ce->pin_count == 0);
796 intel_ring_unpin(ce->ring);
798 i915_gem_object_unpin_map(ce->state->obj);
799 i915_vma_unpin(ce->state);
801 i915_gem_context_put(ctx);
804 static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
807 struct intel_ring *ring = req->ring;
808 struct i915_workarounds *w = &req->i915->workarounds;
813 ret = req->engine->emit_flush(req, EMIT_BARRIER);
817 ret = intel_ring_begin(req, w->count * 2 + 2);
821 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
822 for (i = 0; i < w->count; i++) {
823 intel_ring_emit_reg(ring, w->reg[i].addr);
824 intel_ring_emit(ring, w->reg[i].value);
826 intel_ring_emit(ring, MI_NOOP);
828 intel_ring_advance(ring);
830 ret = req->engine->emit_flush(req, EMIT_BARRIER);
837 #define wa_ctx_emit(batch, index, cmd) \
839 int __index = (index)++; \
840 if (WARN_ON(__index >= (PAGE_SIZE / sizeof(uint32_t)))) { \
843 batch[__index] = (cmd); \
846 #define wa_ctx_emit_reg(batch, index, reg) \
847 wa_ctx_emit((batch), (index), i915_mmio_reg_offset(reg))
850 * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
851 * PIPE_CONTROL instruction. This is required for the flush to happen correctly
852 * but there is a slight complication as this is applied in WA batch where the
853 * values are only initialized once so we cannot take register value at the
854 * beginning and reuse it further; hence we save its value to memory, upload a
855 * constant value with bit21 set and then we restore it back with the saved value.
856 * To simplify the WA, a constant value is formed by using the default value
857 * of this register. This shouldn't be a problem because we are only modifying
858 * it for a short period and this batch in non-premptible. We can ofcourse
859 * use additional instructions that read the actual value of the register
860 * at that time and set our bit of interest but it makes the WA complicated.
862 * This WA is also required for Gen9 so extracting as a function avoids
865 static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
869 struct drm_i915_private *dev_priv = engine->i915;
870 uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
873 * WaDisableLSQCROPERFforOCL:skl,kbl
874 * This WA is implemented in skl_init_clock_gating() but since
875 * this batch updates GEN8_L3SQCREG4 with default value we need to
876 * set this bit here to retain the WA during flush.
878 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0))
879 l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
881 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
882 MI_SRM_LRM_GLOBAL_GTT));
883 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
884 wa_ctx_emit(batch, index, i915_ggtt_offset(engine->scratch) + 256);
885 wa_ctx_emit(batch, index, 0);
887 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
888 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
889 wa_ctx_emit(batch, index, l3sqc4_flush);
891 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
892 wa_ctx_emit(batch, index, (PIPE_CONTROL_CS_STALL |
893 PIPE_CONTROL_DC_FLUSH_ENABLE));
894 wa_ctx_emit(batch, index, 0);
895 wa_ctx_emit(batch, index, 0);
896 wa_ctx_emit(batch, index, 0);
897 wa_ctx_emit(batch, index, 0);
899 wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
900 MI_SRM_LRM_GLOBAL_GTT));
901 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
902 wa_ctx_emit(batch, index, i915_ggtt_offset(engine->scratch) + 256);
903 wa_ctx_emit(batch, index, 0);
908 static inline uint32_t wa_ctx_start(struct i915_wa_ctx_bb *wa_ctx,
910 uint32_t start_alignment)
912 return wa_ctx->offset = ALIGN(offset, start_alignment);
915 static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
917 uint32_t size_alignment)
919 wa_ctx->size = offset - wa_ctx->offset;
921 WARN(wa_ctx->size % size_alignment,
922 "wa_ctx_bb failed sanity checks: size %d is not aligned to %d\n",
923 wa_ctx->size, size_alignment);
928 * Typically we only have one indirect_ctx and per_ctx batch buffer which are
929 * initialized at the beginning and shared across all contexts but this field
930 * helps us to have multiple batches at different offsets and select them based
931 * on a criteria. At the moment this batch always start at the beginning of the page
932 * and at this point we don't have multiple wa_ctx batch buffers.
934 * The number of WA applied are not known at the beginning; we use this field
935 * to return the no of DWORDS written.
937 * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
938 * so it adds NOOPs as padding to make it cacheline aligned.
939 * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
940 * makes a complete batch buffer.
942 static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
943 struct i915_wa_ctx_bb *wa_ctx,
947 uint32_t scratch_addr;
948 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
950 /* WaDisableCtxRestoreArbitration:bdw,chv */
951 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
953 /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
954 if (IS_BROADWELL(engine->i915)) {
955 int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index);
961 /* WaClearSlmSpaceAtContextSwitch:bdw,chv */
962 /* Actual scratch location is at 128 bytes offset */
963 scratch_addr = i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
965 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
966 wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
967 PIPE_CONTROL_GLOBAL_GTT_IVB |
968 PIPE_CONTROL_CS_STALL |
969 PIPE_CONTROL_QW_WRITE));
970 wa_ctx_emit(batch, index, scratch_addr);
971 wa_ctx_emit(batch, index, 0);
972 wa_ctx_emit(batch, index, 0);
973 wa_ctx_emit(batch, index, 0);
975 /* Pad to end of cacheline */
976 while (index % CACHELINE_DWORDS)
977 wa_ctx_emit(batch, index, MI_NOOP);
980 * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because
981 * execution depends on the length specified in terms of cache lines
982 * in the register CTX_RCS_INDIRECT_CTX
985 return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
989 * This batch is started immediately after indirect_ctx batch. Since we ensure
990 * that indirect_ctx ends on a cacheline this batch is aligned automatically.
992 * The number of DWORDS written are returned using this field.
994 * This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding
995 * to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant.
997 static int gen8_init_perctx_bb(struct intel_engine_cs *engine,
998 struct i915_wa_ctx_bb *wa_ctx,
1002 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1004 /* WaDisableCtxRestoreArbitration:bdw,chv */
1005 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
1007 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
1009 return wa_ctx_end(wa_ctx, *offset = index, 1);
1012 static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
1013 struct i915_wa_ctx_bb *wa_ctx,
1018 struct drm_i915_private *dev_priv = engine->i915;
1019 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1021 i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
1023 /* WaDisableCtxRestoreArbitration:skl,bxt */
1024 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0) ||
1025 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
1026 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
1028 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
1029 ret = gen8_emit_flush_coherentl3_wa(engine, batch, index);
1034 /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl */
1035 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
1036 wa_ctx_emit_reg(batch, index, COMMON_SLICE_CHICKEN2);
1037 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(
1038 GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE));
1039 wa_ctx_emit(batch, index, MI_NOOP);
1041 /* WaClearSlmSpaceAtContextSwitch:skl,bxt,kbl,glk,cfl */
1042 /* Actual scratch location is at 128 bytes offset */
1043 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
1044 wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
1045 PIPE_CONTROL_GLOBAL_GTT_IVB |
1046 PIPE_CONTROL_CS_STALL |
1047 PIPE_CONTROL_QW_WRITE));
1048 wa_ctx_emit(batch, index, scratch_addr);
1049 wa_ctx_emit(batch, index, 0);
1050 wa_ctx_emit(batch, index, 0);
1051 wa_ctx_emit(batch, index, 0);
1053 /* WaMediaPoolStateCmdInWABB:bxt */
1054 if (HAS_POOLED_EU(engine->i915)) {
1056 * EU pool configuration is setup along with golden context
1057 * during context initialization. This value depends on
1058 * device type (2x6 or 3x6) and needs to be updated based
1059 * on which subslice is disabled especially for 2x6
1060 * devices, however it is safe to load default
1061 * configuration of 3x6 device instead of masking off
1062 * corresponding bits because HW ignores bits of a disabled
1063 * subslice and drops down to appropriate config. Please
1064 * see render_state_setup() in i915_gem_render_state.c for
1065 * possible configurations, to avoid duplication they are
1066 * not shown here again.
1068 u32 eu_pool_config = 0x00777000;
1069 wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_STATE);
1070 wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_ENABLE);
1071 wa_ctx_emit(batch, index, eu_pool_config);
1072 wa_ctx_emit(batch, index, 0);
1073 wa_ctx_emit(batch, index, 0);
1074 wa_ctx_emit(batch, index, 0);
1077 /* Pad to end of cacheline */
1078 while (index % CACHELINE_DWORDS)
1079 wa_ctx_emit(batch, index, MI_NOOP);
1081 return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
1084 static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
1085 struct i915_wa_ctx_bb *wa_ctx,
1089 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1091 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
1092 if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_B0) ||
1093 IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
1094 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
1095 wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
1096 wa_ctx_emit(batch, index,
1097 _MASKED_BIT_ENABLE(DISABLE_PIXEL_MASK_CAMMING));
1098 wa_ctx_emit(batch, index, MI_NOOP);
1101 /* WaClearTdlStateAckDirtyBits:bxt */
1102 if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_B0)) {
1103 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4));
1105 wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK);
1106 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
1108 wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE1);
1109 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
1111 wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE2);
1112 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
1114 wa_ctx_emit_reg(batch, index, GEN7_ROW_CHICKEN2);
1115 /* dummy write to CS, mask bits are 0 to ensure the register is not modified */
1116 wa_ctx_emit(batch, index, 0x0);
1117 wa_ctx_emit(batch, index, MI_NOOP);
1120 /* WaDisableCtxRestoreArbitration:skl,bxt */
1121 if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
1122 IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
1123 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
1125 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
1127 return wa_ctx_end(wa_ctx, *offset = index, 1);
1130 static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
1132 struct drm_i915_gem_object *obj;
1133 struct i915_vma *vma;
1136 obj = i915_gem_object_create(&engine->i915->drm, PAGE_ALIGN(size));
1138 return PTR_ERR(obj);
1140 vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
1146 err = i915_vma_pin(vma, 0, PAGE_SIZE, PIN_GLOBAL | PIN_HIGH);
1150 engine->wa_ctx.vma = vma;
1154 i915_gem_object_put(obj);
1158 static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *engine)
1160 i915_vma_unpin_and_release(&engine->wa_ctx.vma);
1163 static int intel_init_workaround_bb(struct intel_engine_cs *engine)
1165 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
1171 WARN_ON(engine->id != RCS);
1173 /* update this when WA for higher Gen are added */
1174 if (INTEL_GEN(engine->i915) > 9) {
1175 DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
1176 INTEL_GEN(engine->i915));
1180 /* some WA perform writes to scratch page, ensure it is valid */
1181 if (!engine->scratch) {
1182 DRM_ERROR("scratch page not allocated for %s\n", engine->name);
1186 ret = lrc_setup_wa_ctx_obj(engine, PAGE_SIZE);
1188 DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
1192 page = i915_gem_object_get_dirty_page(wa_ctx->vma->obj, 0);
1193 batch = kmap_atomic(page);
1196 if (IS_GEN8(engine->i915)) {
1197 ret = gen8_init_indirectctx_bb(engine,
1198 &wa_ctx->indirect_ctx,
1204 ret = gen8_init_perctx_bb(engine,
1210 } else if (IS_GEN9(engine->i915)) {
1211 ret = gen9_init_indirectctx_bb(engine,
1212 &wa_ctx->indirect_ctx,
1218 ret = gen9_init_perctx_bb(engine,
1227 kunmap_atomic(batch);
1229 lrc_destroy_wa_ctx_obj(engine);
1234 static void lrc_init_hws(struct intel_engine_cs *engine)
1236 struct drm_i915_private *dev_priv = engine->i915;
1238 I915_WRITE(RING_HWS_PGA(engine->mmio_base),
1239 engine->status_page.ggtt_offset);
1240 POSTING_READ(RING_HWS_PGA(engine->mmio_base));
1243 static int gen8_init_common_ring(struct intel_engine_cs *engine)
1245 struct drm_i915_private *dev_priv = engine->i915;
1248 ret = intel_mocs_init_engine(engine);
1252 lrc_init_hws(engine);
1254 intel_engine_reset_breadcrumbs(engine);
1256 I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
1258 I915_WRITE(RING_MODE_GEN7(engine),
1259 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
1260 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
1262 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
1264 intel_engine_init_hangcheck(engine);
1266 if (!execlists_elsp_idle(engine))
1267 execlists_submit_ports(engine);
1272 static int gen8_init_render_ring(struct intel_engine_cs *engine)
1274 struct drm_i915_private *dev_priv = engine->i915;
1277 ret = gen8_init_common_ring(engine);
1281 /* We need to disable the AsyncFlip performance optimisations in order
1282 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
1283 * programmed to '1' on all products.
1285 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
1287 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
1289 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
1291 return init_workarounds_ring(engine);
1294 static int gen9_init_render_ring(struct intel_engine_cs *engine)
1298 ret = gen8_init_common_ring(engine);
1302 return init_workarounds_ring(engine);
1305 static void reset_common_ring(struct intel_engine_cs *engine,
1306 struct drm_i915_gem_request *request)
1308 struct drm_i915_private *dev_priv = engine->i915;
1309 struct execlist_port *port = engine->execlist_port;
1310 struct intel_context *ce = &request->ctx->engine[engine->id];
1312 /* We want a simple context + ring to execute the breadcrumb update.
1313 * We cannot rely on the context being intact across the GPU hang,
1314 * so clear it and rebuild just what we need for the breadcrumb.
1315 * All pending requests for this context will be zapped, and any
1316 * future request will be after userspace has had the opportunity
1317 * to recreate its own state.
1319 execlists_init_reg_state(ce->lrc_reg_state,
1320 request->ctx, engine, ce->ring);
1322 /* Move the RING_HEAD onto the breadcrumb, past the hanging batch */
1323 ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
1324 i915_ggtt_offset(ce->ring->vma);
1325 ce->lrc_reg_state[CTX_RING_HEAD+1] = request->postfix;
1327 request->ring->head = request->postfix;
1328 request->ring->last_retired_head = -1;
1329 intel_ring_update_space(request->ring);
1331 if (i915.enable_guc_submission)
1334 /* Catch up with any missed context-switch interrupts */
1335 I915_WRITE(RING_CONTEXT_STATUS_PTR(engine), _MASKED_FIELD(0xffff, 0));
1336 if (request->ctx != port[0].request->ctx) {
1337 i915_gem_request_put(port[0].request);
1339 memset(&port[1], 0, sizeof(port[1]));
1342 /* CS is stopped, and we will resubmit both ports on resume */
1343 GEM_BUG_ON(request->ctx != port[0].request->ctx);
1347 /* Reset WaIdleLiteRestore:bdw,skl as well */
1348 request->tail = request->wa_tail - WA_TAIL_DWORDS * sizeof(u32);
1351 static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
1353 struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
1354 struct intel_ring *ring = req->ring;
1355 struct intel_engine_cs *engine = req->engine;
1356 const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
1359 ret = intel_ring_begin(req, num_lri_cmds * 2 + 2);
1363 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_lri_cmds));
1364 for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
1365 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1367 intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, i));
1368 intel_ring_emit(ring, upper_32_bits(pd_daddr));
1369 intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, i));
1370 intel_ring_emit(ring, lower_32_bits(pd_daddr));
1373 intel_ring_emit(ring, MI_NOOP);
1374 intel_ring_advance(ring);
1379 static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
1380 u64 offset, u32 len,
1381 unsigned int dispatch_flags)
1383 struct intel_ring *ring = req->ring;
1384 bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
1387 /* Don't rely in hw updating PDPs, specially in lite-restore.
1388 * Ideally, we should set Force PD Restore in ctx descriptor,
1389 * but we can't. Force Restore would be a second option, but
1390 * it is unsafe in case of lite-restore (because the ctx is
1391 * not idle). PML4 is allocated during ppgtt init so this is
1392 * not needed in 48-bit.*/
1393 if (req->ctx->ppgtt &&
1394 (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) {
1395 if (!USES_FULL_48BIT_PPGTT(req->i915) &&
1396 !intel_vgpu_active(req->i915)) {
1397 ret = intel_logical_ring_emit_pdps(req);
1402 req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine);
1405 ret = intel_ring_begin(req, 4);
1409 /* FIXME(BDW): Address space and security selectors. */
1410 intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 |
1412 (dispatch_flags & I915_DISPATCH_RS ?
1413 MI_BATCH_RESOURCE_STREAMER : 0));
1414 intel_ring_emit(ring, lower_32_bits(offset));
1415 intel_ring_emit(ring, upper_32_bits(offset));
1416 intel_ring_emit(ring, MI_NOOP);
1417 intel_ring_advance(ring);
1422 static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
1424 struct drm_i915_private *dev_priv = engine->i915;
1425 I915_WRITE_IMR(engine,
1426 ~(engine->irq_enable_mask | engine->irq_keep_mask));
1427 POSTING_READ_FW(RING_IMR(engine->mmio_base));
1430 static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
1432 struct drm_i915_private *dev_priv = engine->i915;
1433 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1436 static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
1438 struct intel_ring *ring = request->ring;
1442 ret = intel_ring_begin(request, 4);
1446 cmd = MI_FLUSH_DW + 1;
1448 /* We always require a command barrier so that subsequent
1449 * commands, such as breadcrumb interrupts, are strictly ordered
1450 * wrt the contents of the write cache being flushed to memory
1451 * (and thus being coherent from the CPU).
1453 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1455 if (mode & EMIT_INVALIDATE) {
1456 cmd |= MI_INVALIDATE_TLB;
1457 if (request->engine->id == VCS)
1458 cmd |= MI_INVALIDATE_BSD;
1461 intel_ring_emit(ring, cmd);
1462 intel_ring_emit(ring,
1463 I915_GEM_HWS_SCRATCH_ADDR |
1464 MI_FLUSH_DW_USE_GTT);
1465 intel_ring_emit(ring, 0); /* upper addr */
1466 intel_ring_emit(ring, 0); /* value */
1467 intel_ring_advance(ring);
1472 static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
1475 struct intel_ring *ring = request->ring;
1476 struct intel_engine_cs *engine = request->engine;
1478 i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
1479 bool vf_flush_wa = false, dc_flush_wa = false;
1484 flags |= PIPE_CONTROL_CS_STALL;
1486 if (mode & EMIT_FLUSH) {
1487 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
1488 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
1489 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
1490 flags |= PIPE_CONTROL_FLUSH_ENABLE;
1493 if (mode & EMIT_INVALIDATE) {
1494 flags |= PIPE_CONTROL_TLB_INVALIDATE;
1495 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
1496 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
1497 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
1498 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
1499 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
1500 flags |= PIPE_CONTROL_QW_WRITE;
1501 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
1504 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
1507 if (IS_GEN9(request->i915))
1510 /* WaForGAMHang:kbl */
1511 if (IS_KBL_REVID(request->i915, 0, KBL_REVID_B0))
1523 ret = intel_ring_begin(request, len);
1528 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
1529 intel_ring_emit(ring, 0);
1530 intel_ring_emit(ring, 0);
1531 intel_ring_emit(ring, 0);
1532 intel_ring_emit(ring, 0);
1533 intel_ring_emit(ring, 0);
1537 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
1538 intel_ring_emit(ring, PIPE_CONTROL_DC_FLUSH_ENABLE);
1539 intel_ring_emit(ring, 0);
1540 intel_ring_emit(ring, 0);
1541 intel_ring_emit(ring, 0);
1542 intel_ring_emit(ring, 0);
1545 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
1546 intel_ring_emit(ring, flags);
1547 intel_ring_emit(ring, scratch_addr);
1548 intel_ring_emit(ring, 0);
1549 intel_ring_emit(ring, 0);
1550 intel_ring_emit(ring, 0);
1553 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
1554 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL);
1555 intel_ring_emit(ring, 0);
1556 intel_ring_emit(ring, 0);
1557 intel_ring_emit(ring, 0);
1558 intel_ring_emit(ring, 0);
1561 intel_ring_advance(ring);
1566 static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
1569 * On BXT A steppings there is a HW coherency issue whereby the
1570 * MI_STORE_DATA_IMM storing the completed request's seqno
1571 * occasionally doesn't invalidate the CPU cache. Work around this by
1572 * clflushing the corresponding cacheline whenever the caller wants
1573 * the coherency to be guaranteed. Note that this cacheline is known
1574 * to be clean at this point, since we only write it in
1575 * bxt_a_set_seqno(), where we also do a clflush after the write. So
1576 * this clflush in practice becomes an invalidate operation.
1578 intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
1582 * Reserve space for 2 NOOPs at the end of each request to be
1583 * used as a workaround for not being allowed to do lite
1584 * restore with HEAD==TAIL (WaIdleLiteRestore).
1587 static int gen8_emit_request(struct drm_i915_gem_request *request)
1589 struct intel_ring *ring = request->ring;
1592 ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS);
1596 /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
1597 BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
1599 intel_ring_emit(ring, (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
1600 intel_ring_emit(ring,
1601 intel_hws_seqno_address(request->engine) |
1602 MI_FLUSH_DW_USE_GTT);
1603 intel_ring_emit(ring, 0);
1604 intel_ring_emit(ring, request->fence.seqno);
1605 intel_ring_emit(ring, MI_USER_INTERRUPT);
1606 intel_ring_emit(ring, MI_NOOP);
1607 return intel_logical_ring_advance(request);
1610 static int gen8_emit_request_render(struct drm_i915_gem_request *request)
1612 struct intel_ring *ring = request->ring;
1615 ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS);
1619 /* We're using qword write, seqno should be aligned to 8 bytes. */
1620 BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
1622 /* w/a for post sync ops following a GPGPU operation we
1623 * need a prior CS_STALL, which is emitted by the flush
1624 * following the batch.
1626 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
1627 intel_ring_emit(ring,
1628 (PIPE_CONTROL_GLOBAL_GTT_IVB |
1629 PIPE_CONTROL_CS_STALL |
1630 PIPE_CONTROL_QW_WRITE));
1631 intel_ring_emit(ring, intel_hws_seqno_address(request->engine));
1632 intel_ring_emit(ring, 0);
1633 intel_ring_emit(ring, i915_gem_request_get_seqno(request));
1634 /* We're thrashing one dword of HWS. */
1635 intel_ring_emit(ring, 0);
1636 intel_ring_emit(ring, MI_USER_INTERRUPT);
1637 intel_ring_emit(ring, MI_NOOP);
1638 return intel_logical_ring_advance(request);
1641 static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
1645 ret = intel_logical_ring_workarounds_emit(req);
1649 ret = intel_rcs_context_init_mocs(req);
1651 * Failing to program the MOCS is non-fatal.The system will not
1652 * run at peak performance. So generate an error and carry on.
1655 DRM_ERROR("MOCS failed to program: expect performance issues.\n");
1657 return i915_gem_render_state_init(req);
1661 * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
1662 * @engine: Engine Command Streamer.
1664 void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
1666 struct drm_i915_private *dev_priv;
1668 if (!intel_engine_initialized(engine))
1672 * Tasklet cannot be active at this point due intel_mark_active/idle
1673 * so this is just for documentation.
1675 if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state)))
1676 tasklet_kill(&engine->irq_tasklet);
1678 dev_priv = engine->i915;
1680 if (engine->buffer) {
1681 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
1684 if (engine->cleanup)
1685 engine->cleanup(engine);
1687 intel_engine_cleanup_common(engine);
1689 if (engine->status_page.vma) {
1690 i915_gem_object_unpin_map(engine->status_page.vma->obj);
1691 engine->status_page.vma = NULL;
1693 intel_lr_context_unpin(dev_priv->kernel_context, engine);
1695 lrc_destroy_wa_ctx_obj(engine);
1696 engine->i915 = NULL;
1699 void intel_execlists_enable_submission(struct drm_i915_private *dev_priv)
1701 struct intel_engine_cs *engine;
1703 for_each_engine(engine, dev_priv)
1704 engine->submit_request = execlists_submit_request;
1708 logical_ring_default_vfuncs(struct intel_engine_cs *engine)
1710 /* Default vfuncs which can be overriden by each engine. */
1711 engine->init_hw = gen8_init_common_ring;
1712 engine->reset_hw = reset_common_ring;
1713 engine->emit_flush = gen8_emit_flush;
1714 engine->emit_request = gen8_emit_request;
1715 engine->submit_request = execlists_submit_request;
1717 engine->irq_enable = gen8_logical_ring_enable_irq;
1718 engine->irq_disable = gen8_logical_ring_disable_irq;
1719 engine->emit_bb_start = gen8_emit_bb_start;
1720 if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
1721 engine->irq_seqno_barrier = bxt_a_seqno_barrier;
1725 logical_ring_default_irqs(struct intel_engine_cs *engine)
1727 unsigned shift = engine->irq_shift;
1728 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
1729 engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
1733 lrc_setup_hws(struct intel_engine_cs *engine, struct i915_vma *vma)
1735 const int hws_offset = LRC_PPHWSP_PN * PAGE_SIZE;
1738 /* The HWSP is part of the default context object in LRC mode. */
1739 hws = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
1741 return PTR_ERR(hws);
1743 engine->status_page.page_addr = hws + hws_offset;
1744 engine->status_page.ggtt_offset = i915_ggtt_offset(vma) + hws_offset;
1745 engine->status_page.vma = vma;
1751 logical_ring_setup(struct intel_engine_cs *engine)
1753 struct drm_i915_private *dev_priv = engine->i915;
1754 enum forcewake_domains fw_domains;
1756 intel_engine_setup_common(engine);
1758 /* Intentionally left blank. */
1759 engine->buffer = NULL;
1761 fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
1765 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
1766 RING_CONTEXT_STATUS_PTR(engine),
1767 FW_REG_READ | FW_REG_WRITE);
1769 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
1770 RING_CONTEXT_STATUS_BUF_BASE(engine),
1773 engine->fw_domains = fw_domains;
1775 tasklet_init(&engine->irq_tasklet,
1776 intel_lrc_irq_handler, (unsigned long)engine);
1778 logical_ring_init_platform_invariants(engine);
1779 logical_ring_default_vfuncs(engine);
1780 logical_ring_default_irqs(engine);
1784 logical_ring_init(struct intel_engine_cs *engine)
1786 struct i915_gem_context *dctx = engine->i915->kernel_context;
1789 ret = intel_engine_init_common(engine);
1793 ret = execlists_context_deferred_alloc(dctx, engine);
1797 /* As this is the default context, always pin it */
1798 ret = intel_lr_context_pin(dctx, engine);
1800 DRM_ERROR("Failed to pin context for %s: %d\n",
1805 /* And setup the hardware status page. */
1806 ret = lrc_setup_hws(engine, dctx->engine[engine->id].state);
1808 DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret);
1815 intel_logical_ring_cleanup(engine);
1819 int logical_render_ring_init(struct intel_engine_cs *engine)
1821 struct drm_i915_private *dev_priv = engine->i915;
1824 logical_ring_setup(engine);
1826 if (HAS_L3_DPF(dev_priv))
1827 engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
1829 /* Override some for render ring. */
1830 if (INTEL_GEN(dev_priv) >= 9)
1831 engine->init_hw = gen9_init_render_ring;
1833 engine->init_hw = gen8_init_render_ring;
1834 engine->init_context = gen8_init_rcs_context;
1835 engine->emit_flush = gen8_emit_flush_render;
1836 engine->emit_request = gen8_emit_request_render;
1838 ret = intel_engine_create_scratch(engine, 4096);
1842 ret = intel_init_workaround_bb(engine);
1845 * We continue even if we fail to initialize WA batch
1846 * because we only expect rare glitches but nothing
1847 * critical to prevent us from using GPU
1849 DRM_ERROR("WA batch buffer initialization failed: %d\n",
1853 ret = logical_ring_init(engine);
1855 lrc_destroy_wa_ctx_obj(engine);
1861 int logical_xcs_ring_init(struct intel_engine_cs *engine)
1863 logical_ring_setup(engine);
1865 return logical_ring_init(engine);
1869 make_rpcs(struct drm_i915_private *dev_priv)
1874 * No explicit RPCS request is needed to ensure full
1875 * slice/subslice/EU enablement prior to Gen9.
1877 if (INTEL_GEN(dev_priv) < 9)
1881 * Starting in Gen9, render power gating can leave
1882 * slice/subslice/EU in a partially enabled state. We
1883 * must make an explicit request through RPCS for full
1886 if (INTEL_INFO(dev_priv)->sseu.has_slice_pg) {
1887 rpcs |= GEN8_RPCS_S_CNT_ENABLE;
1888 rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.slice_mask) <<
1889 GEN8_RPCS_S_CNT_SHIFT;
1890 rpcs |= GEN8_RPCS_ENABLE;
1893 if (INTEL_INFO(dev_priv)->sseu.has_subslice_pg) {
1894 rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
1895 rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.subslice_mask) <<
1896 GEN8_RPCS_SS_CNT_SHIFT;
1897 rpcs |= GEN8_RPCS_ENABLE;
1900 if (INTEL_INFO(dev_priv)->sseu.has_eu_pg) {
1901 rpcs |= INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
1902 GEN8_RPCS_EU_MIN_SHIFT;
1903 rpcs |= INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
1904 GEN8_RPCS_EU_MAX_SHIFT;
1905 rpcs |= GEN8_RPCS_ENABLE;
1911 static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
1913 u32 indirect_ctx_offset;
1915 switch (INTEL_GEN(engine->i915)) {
1917 MISSING_CASE(INTEL_GEN(engine->i915));
1920 indirect_ctx_offset =
1921 GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
1924 indirect_ctx_offset =
1925 GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
1929 return indirect_ctx_offset;
1932 static void execlists_init_reg_state(u32 *reg_state,
1933 struct i915_gem_context *ctx,
1934 struct intel_engine_cs *engine,
1935 struct intel_ring *ring)
1937 struct drm_i915_private *dev_priv = engine->i915;
1938 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: dev_priv->mm.aliasing_ppgtt;
1940 /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
1941 * commands followed by (reg, value) pairs. The values we are setting here are
1942 * only for the first context restore: on a subsequent save, the GPU will
1943 * recreate this batchbuffer with new values (including all the missing
1944 * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
1945 reg_state[CTX_LRI_HEADER_0] =
1946 MI_LOAD_REGISTER_IMM(engine->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED;
1947 ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL,
1948 RING_CONTEXT_CONTROL(engine),
1949 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
1950 CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
1951 (HAS_RESOURCE_STREAMER(dev_priv) ?
1952 CTX_CTRL_RS_CTX_ENABLE : 0)));
1953 ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
1955 ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(engine->mmio_base),
1957 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START,
1958 RING_START(engine->mmio_base), 0);
1959 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
1960 RING_CTL(engine->mmio_base),
1961 ((ring->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
1962 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U,
1963 RING_BBADDR_UDW(engine->mmio_base), 0);
1964 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L,
1965 RING_BBADDR(engine->mmio_base), 0);
1966 ASSIGN_CTX_REG(reg_state, CTX_BB_STATE,
1967 RING_BBSTATE(engine->mmio_base),
1969 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U,
1970 RING_SBBADDR_UDW(engine->mmio_base), 0);
1971 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L,
1972 RING_SBBADDR(engine->mmio_base), 0);
1973 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE,
1974 RING_SBBSTATE(engine->mmio_base), 0);
1975 if (engine->id == RCS) {
1976 ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR,
1977 RING_BB_PER_CTX_PTR(engine->mmio_base), 0);
1978 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX,
1979 RING_INDIRECT_CTX(engine->mmio_base), 0);
1980 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET,
1981 RING_INDIRECT_CTX_OFFSET(engine->mmio_base), 0);
1982 if (engine->wa_ctx.vma) {
1983 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
1984 u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
1986 reg_state[CTX_RCS_INDIRECT_CTX+1] =
1987 (ggtt_offset + wa_ctx->indirect_ctx.offset * sizeof(uint32_t)) |
1988 (wa_ctx->indirect_ctx.size / CACHELINE_DWORDS);
1990 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] =
1991 intel_lr_indirect_ctx_offset(engine) << 6;
1993 reg_state[CTX_BB_PER_CTX_PTR+1] =
1994 (ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) |
1998 reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;
1999 ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP,
2000 RING_CTX_TIMESTAMP(engine->mmio_base), 0);
2001 /* PDP values well be assigned later if needed */
2002 ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(engine, 3),
2004 ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(engine, 3),
2006 ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(engine, 2),
2008 ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(engine, 2),
2010 ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(engine, 1),
2012 ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(engine, 1),
2014 ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0),
2016 ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0),
2019 if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
2020 /* 64b PPGTT (48bit canonical)
2021 * PDP0_DESCRIPTOR contains the base address to PML4 and
2022 * other PDP Descriptors are ignored.
2024 ASSIGN_CTX_PML4(ppgtt, reg_state);
2027 * PDP*_DESCRIPTOR contains the base address of space supported.
2028 * With dynamic page allocation, PDPs may not be allocated at
2029 * this point. Point the unallocated PDPs to the scratch page
2031 execlists_update_context_pdps(ppgtt, reg_state);
2034 if (engine->id == RCS) {
2035 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
2036 ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
2037 make_rpcs(dev_priv));
2042 populate_lr_context(struct i915_gem_context *ctx,
2043 struct drm_i915_gem_object *ctx_obj,
2044 struct intel_engine_cs *engine,
2045 struct intel_ring *ring)
2050 ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
2052 DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
2056 vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
2057 if (IS_ERR(vaddr)) {
2058 ret = PTR_ERR(vaddr);
2059 DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
2062 ctx_obj->dirty = true;
2064 /* The second page of the context object contains some fields which must
2065 * be set up prior to the first execution. */
2067 execlists_init_reg_state(vaddr + LRC_STATE_PN * PAGE_SIZE,
2070 i915_gem_object_unpin_map(ctx_obj);
2076 * intel_lr_context_size() - return the size of the context for an engine
2077 * @engine: which engine to find the context size for
2079 * Each engine may require a different amount of space for a context image,
2080 * so when allocating (or copying) an image, this function can be used to
2081 * find the right size for the specific engine.
2083 * Return: size (in bytes) of an engine-specific context image
2085 * Note: this size includes the HWSP, which is part of the context image
2086 * in LRC mode, but does not include the "shared data page" used with
2087 * GuC submission. The caller should account for this if using the GuC.
2089 uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
2093 WARN_ON(INTEL_GEN(engine->i915) < 8);
2095 switch (engine->id) {
2097 if (INTEL_GEN(engine->i915) >= 9)
2098 ret = GEN9_LR_CONTEXT_RENDER_SIZE;
2100 ret = GEN8_LR_CONTEXT_RENDER_SIZE;
2106 ret = GEN8_LR_CONTEXT_OTHER_SIZE;
2113 static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
2114 struct intel_engine_cs *engine)
2116 struct drm_i915_gem_object *ctx_obj;
2117 struct intel_context *ce = &ctx->engine[engine->id];
2118 struct i915_vma *vma;
2119 uint32_t context_size;
2120 struct intel_ring *ring;
2125 context_size = round_up(intel_lr_context_size(engine), 4096);
2127 /* One extra page as the sharing data between driver and GuC */
2128 context_size += PAGE_SIZE * LRC_PPHWSP_PN;
2130 ctx_obj = i915_gem_object_create(&ctx->i915->drm, context_size);
2131 if (IS_ERR(ctx_obj)) {
2132 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
2133 return PTR_ERR(ctx_obj);
2136 vma = i915_vma_create(ctx_obj, &ctx->i915->ggtt.base, NULL);
2139 goto error_deref_obj;
2142 ring = intel_engine_create_ring(engine, ctx->ring_size);
2144 ret = PTR_ERR(ring);
2145 goto error_deref_obj;
2148 ret = populate_lr_context(ctx, ctx_obj, engine, ring);
2150 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
2151 goto error_ring_free;
2156 ce->initialised = engine->init_context == NULL;
2161 intel_ring_free(ring);
2163 i915_gem_object_put(ctx_obj);
2167 void intel_lr_context_resume(struct drm_i915_private *dev_priv)
2169 struct i915_gem_context *ctx = dev_priv->kernel_context;
2170 struct intel_engine_cs *engine;
2172 for_each_engine(engine, dev_priv) {
2173 struct intel_context *ce = &ctx->engine[engine->id];
2175 uint32_t *reg_state;
2180 vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
2181 if (WARN_ON(IS_ERR(vaddr)))
2184 reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
2186 reg_state[CTX_RING_HEAD+1] = 0;
2187 reg_state[CTX_RING_TAIL+1] = 0;
2189 ce->state->obj->dirty = true;
2190 i915_gem_object_unpin_map(ce->state->obj);